manila-2.0.0/0000775000567000056710000000000012701407265014117 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/0000775000567000056710000000000012701407265015723 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/settings0000664000567000056710000002064612701407107017511 0ustar jenkinsjenkins00000000000000# Setting configuration file for manila services # ---------------------------------------------- # 1) It is possible to set any custom opt to any config group using following: # $ export MANILA_OPTGROUP_foo_bar=value # where 'foo' is name of config group and 'bar' is name of option. # # 2) 'MANILA_CONFIGURE_GROUPS' contains list of config group names used to create # config groups, but 'MANILA_ENABLED_BACKENDS' is used to set config groups as # Manila share back ends. Both can be set like following: # $ export MANILA_ENABLED_BACKENDS=foo,bar # where 'foo' and 'bar' are names of config groups with opts for some share # drivers. By default they are equal. Also be attentive, if you modify both, # make sure 'MANILA_CONFIGURE_GROUPS' contains all values from # 'MANILA_ENABLED_BACKENDS'. # DEFAULT group is always defined, no need to specify it within 'MANILA_CONFIGURE_GROUPS'. # # 3) Two default backends are used for compatibility with previous approach. # They have same configuration except name of backend. Both use generic driver. # They can be enabled by adding values of following env vars: # 'MANILA_BACKEND1_CONFIG_GROUP_NAME' and 'MANILA_BACKEND2_CONFIG_GROUP_NAME' # to the env var 'MANILA_ENABLED_BACKENDS' or will be enabled # if 'MANILA_ENABLED_BACKENDS' is empty. # # 4) 'CINDER_OVERSUBSCRIPTION_RATIO' - manila devstack-plugin env var that is # useful for all share drivers that use Cinder. If it is set, then it will be # applied for two Cinder options: 'max_over_subscription_ratio' and # 'lvm_max_over_subscription_ratio'. Should be float. Example: # CINDER_OVERSUBSCRIPTION_RATIO=20.0 # Defaults # -------- MANILA_GIT_BASE=${MANILA_GIT_BASE:-https://github.com} MANILA_REPO_ROOT=${MANILA_REPO_ROOT:-openstack} MANILACLIENT_REPO=${MANILA_GIT_BASE}/${MANILA_REPO_ROOT}/python-manilaclient.git MANILACLIENT_BRANCH=${MANILACLIENT_BRANCH:-master} MANILA_UI_REPO=${MANILA_GIT_BASE}/${MANILA_REPO_ROOT}/manila-ui.git MANILA_UI_BRANCH=${MANILA_UI_BRANCH:-$MANILACLIENT_BRANCH} MANILA_UI_ENABLED=$(trueorfalse True MANILA_UI_ENABLED) # Set up default directories MANILA_DIR=${MANILA_DIR:=$DEST/manila} MANILA_LOCK_PATH=${MANILA_LOCK_PATH:=$OSLO_LOCK_PATH} MANILA_LOCK_PATH=${MANILA_LOCK_PATH:=$MANILA_DIR/manila_locks} MANILACLIENT_DIR=${MANILACLIENT_DIR:=$DEST/python-manilaclient} MANILA_UI_DIR=${MANILA_UI_DIR:=$DEST/manila-ui} MANILA_STATE_PATH=${MANILA_STATE_PATH:=$DATA_DIR/manila} MANILA_AUTH_CACHE_DIR=${MANILA_AUTH_CACHE_DIR:-/var/cache/manila} MANILA_CONF_DIR=${MANILA_CONF_DIR:-/etc/manila} MANILA_CONF=$MANILA_CONF_DIR/manila.conf MANILA_API_PASTE_INI=$MANILA_CONF_DIR/api-paste.ini MANILA_DEFAULT_SHARE_TYPE=${MANILA_DEFAULT_SHARE_TYPE:-default} # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS is expected to contain extra specs key-value pairs, # that should be assigned to default share type. Both - qualified and unqualified extra specs are supported. # Pairs are separated by spaces, value is assigned to key using sign of equality. Examples: # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar' # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar quuz=xyzzy' # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar quuz=xyzzy fakeprefix:baz=waldo' MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS=${MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS:-''} # Public facing bits MANILA_SERVICE_HOST=${MANILA_SERVICE_HOST:-$SERVICE_HOST} MANILA_SERVICE_PORT=${MANILA_SERVICE_PORT:-8786} MANILA_SERVICE_PORT_INT=${MANILA_SERVICE_PORT_INT:-18776} MANILA_SERVICE_PROTOCOL=${MANILA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Support entry points installation of console scripts if [[ -d $MANILA_DIR/bin ]]; then MANILA_BIN_DIR=$MANILA_DIR/bin else MANILA_BIN_DIR=$(get_python_exec_prefix) fi # Common opts SHARE_NAME_PREFIX=${SHARE_NAME_PREFIX:-share-} MANILA_ENABLED_SHARE_PROTOCOLS=${ENABLED_SHARE_PROTOCOLS:-"NFS,CIFS"} MANILA_SCHEDULER_DRIVER=${MANILA_SCHEDULER_DRIVER:-manila.scheduler.filter_scheduler.FilterScheduler} MANILA_SERVICE_SECGROUP="manila-service" # Following env var defines whether to apply downgrade migrations setting up DB or not. # If it is set to False, then only 'upgrade' migrations will be applied. # If it is set to True, then will be applied 'upgrade', 'downgrade' and 'upgrade' # migrations again. MANILA_USE_DOWNGRADE_MIGRATIONS=${MANILA_USE_DOWNGRADE_MIGRATIONS:-"False"} # Common info for Generic driver(s) SHARE_DRIVER=${SHARE_DRIVER:-manila.share.drivers.generic.GenericShareDriver} eval USER_HOME=~ MANILA_PATH_TO_PUBLIC_KEY=${MANILA_PATH_TO_PUBLIC_KEY:-"$USER_HOME/.ssh/id_rsa.pub"} MANILA_PATH_TO_PRIVATE_KEY=${MANILA_PATH_TO_PRIVATE_KEY:-"$USER_HOME/.ssh/id_rsa"} MANILA_SERVICE_KEYPAIR_NAME=${MANILA_SERVICE_KEYPAIR_NAME:-"manila-service"} MANILA_SERVICE_INSTANCE_USER=${MANILA_SERVICE_INSTANCE_USER:-"manila"} MANILA_SERVICE_IMAGE_URL=${MANILA_SERVICE_IMAGE_URL:-"http://tarballs.openstack.org/manila-image-elements/images/manila-service-image-master.qcow2"} MANILA_SERVICE_IMAGE_NAME=${MANILA_SERVICE_IMAGE_NAME:-"manila-service-image-master"} # Third party CI Vendors should set this to false to skip the service image download MANILA_SERVICE_IMAGE_ENABLED=$(trueorfalse True MANILA_SERVICE_IMAGE_ENABLED) MANILA_USE_SERVICE_INSTANCE_PASSWORD=${MANILA_USE_SERVICE_INSTANCE_PASSWORD:-"False"} MANILA_SERVICE_INSTANCE_PASSWORD=${MANILA_SERVICE_INSTANCE_PASSWORD:-"manila"} MANILA_SERVICE_VM_FLAVOR_REF=${MANILA_SERVICE_VM_FLAVOR_REF:-100} MANILA_SERVICE_VM_FLAVOR_NAME=${MANILA_SERVICE_VM_FLAVOR_NAME:-"manila-service-flavor"} MANILA_SERVICE_VM_FLAVOR_RAM=${MANILA_SERVICE_VM_FLAVOR_RAM:-128} MANILA_SERVICE_VM_FLAVOR_DISK=${MANILA_SERVICE_VM_FLAVOR_DISK:-0} MANILA_SERVICE_VM_FLAVOR_VCPUS=${MANILA_SERVICE_VM_FLAVOR_VCPUS:-1} # Support for multi backend configuration (default is no support) MANILA_MULTI_BACKEND=$(trueorfalse False MANILA_MULTI_BACKEND) DEPRECATED_TEXT="$DEPRECATED_TEXT\n'MANILA_MULTI_BACKEND' is deprecated, it makes influence only when is set to True and 'MANILA_ENABLED_BACKENDS' is not set. Use 'MANILA_ENABLED_BACKENDS' instead if you want to use custom setting. Set there a list of back end names to be enabled.\n To configure custom back ends use (any opt in any group can be set in this way) following: MANILA_OPTGROUP_foo_bar=value where 'foo' is name of config group and 'bar' is name of option.\n" # First share backend data, that will be used in any installation MANILA_BACKEND1_CONFIG_GROUP_NAME=${MANILA_BACKEND1_CONFIG_GROUP_NAME:-generic1} # deprecated MANILA_SHARE_BACKEND1_NAME=${MANILA_SHARE_BACKEND1_NAME:-GENERIC1} # deprecated # Second share backend data, that will be used only with MANILA_MULTI_BACKEND=True MANILA_BACKEND2_CONFIG_GROUP_NAME=${MANILA_BACKEND2_CONFIG_GROUP_NAME:-generic2} # deprecated MANILA_SHARE_BACKEND2_NAME=${MANILA_SHARE_BACKEND2_NAME:-GENERIC2} # deprecated # Options for configuration of LVM share driver SHARE_BACKING_FILE_SIZE=${SHARE_BACKING_FILE_SIZE:-8400M} SHARE_GROUP=${SHARE_GROUP:-lvm-shares} MANILA_MNT_DIR=${MANILA_MNT_DIR:=$MANILA_STATE_PATH/mnt} SMB_CONF=${SMB_CONF:-/etc/samba/smb.conf} SMB_PRIVATE_DIR=${SMB_PRIVATE_DIR:-/var/lib/samba/private} CONFIGURE_BACKING_FILE=${CONFIGURE_BACKING_FILE:-"True"} # Options for replication MANILA_REPLICA_STATE_UPDATE_INTERVAL=${MANILA_REPLICA_STATE_UPDATE_INTERVAL:-300} # Options for configuration of ZFSonLinux driver # 'MANILA_ZFSONLINUX_ZPOOL_SIZE' defines size of each zpool. That value # will be used for creation of sparse files. MANILA_ZFSONLINUX_ZPOOL_SIZE=${MANILA_ZFSONLINUX_ZPOOL_SIZE:-"30G"} MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR=${MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR:-"/opt/stack/data/manila/zfsonlinux"} MANILA_ZFSONLINUX_SHARE_EXPORT_IP=${MANILA_ZFSONLINUX_SHARE_EXPORT_IP:-"127.0.0.1"} MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-"127.0.0.1"} MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS=${MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS:-"compression=gzip"} MANILA_ZFSONLINUX_USE_SSH=${MANILA_ZFSONLINUX_USE_SSH:-"False"} MANILA_ZFSONLINUX_SSH_USERNAME=${MANILA_ZFSONLINUX_SSH_USERNAME:-$STACK_USER} # If MANILA_ZFSONLINUX_REPLICATION_DOMAIN is set to empty value then # Manila will consider replication feature as disabled for ZFSonLinux share driver. MANILA_ZFSONLINUX_REPLICATION_DOMAIN=${MANILA_ZFSONLINUX_REPLICATION_DOMAIN:-"ZFSonLinux"} # Enable manila services # ---------------------- # We have to add Manila to enabled services for screen_it to work # It consists of 4 parts: m-api (API), m-shr (Share), m-sch (Scheduler) # and m-dat (Data). enable_service manila enable_service m-api enable_service m-shr enable_service m-sch enable_service m-dat manila-2.0.0/devstack/plugin.sh0000775000567000056710000007671112701407112017563 0ustar jenkinsjenkins00000000000000# Plugin file for enabling manila services # ---------------------------------------- # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Entry Points # ------------ function _clean_share_group { local vg=$1 local vg_prefix=$2 # Clean out existing shares for lv in `sudo lvs --noheadings -o lv_name $vg`; do # vg_prefix prefixes the LVs we want if [[ "${lv#$vg_prefix}" != "$lv" ]]; then sudo umount -f $MANILA_MNT_DIR/$lv sudo lvremove -f $vg/$lv sudo rm -rf $MANILA_MNT_DIR/$lv fi done } function _clean_manila_lvm_backing_file { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1 }') if [[ -n "$VG_DEV" ]]; then sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi fi } function _clean_zfsonlinux_data { for filename in "$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/*; do if [[ $(sudo zpool list | grep $filename) ]]; then echo "Destroying zpool named $filename" sudo zpool destroy -f $filename file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR$filename" echo "Destroying file named $file" rm -f $file fi done } # cleanup_manila - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_manila { # All stuff, that are created by share drivers will be cleaned up by other services. _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX _clean_manila_lvm_backing_file $SHARE_GROUP _clean_zfsonlinux_data } # configure_default_backends - configures default Manila backends with generic driver. function configure_default_backends { # Configure two default backends with generic drivers onboard for group_name in $MANILA_BACKEND1_CONFIG_GROUP_NAME $MANILA_BACKEND2_CONFIG_GROUP_NAME; do iniset $MANILA_CONF $group_name share_driver $SHARE_DRIVER if [ "$MANILA_BACKEND1_CONFIG_GROUP_NAME" == "$group_name" ]; then iniset $MANILA_CONF $group_name share_backend_name $MANILA_SHARE_BACKEND1_NAME else iniset $MANILA_CONF $group_name share_backend_name $MANILA_SHARE_BACKEND2_NAME fi iniset $MANILA_CONF $group_name path_to_public_key $MANILA_PATH_TO_PUBLIC_KEY iniset $MANILA_CONF $group_name path_to_private_key $MANILA_PATH_TO_PRIVATE_KEY iniset $MANILA_CONF $group_name service_image_name $MANILA_SERVICE_IMAGE_NAME iniset $MANILA_CONF $group_name service_instance_user $MANILA_SERVICE_INSTANCE_USER iniset $MANILA_CONF $group_name driver_handles_share_servers True if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then iniset $MANILA_CONF $group_name service_instance_password $MANILA_SERVICE_INSTANCE_PASSWORD fi done } # set_config_opts - this allows to set any config opt to any config group, # parsing env vars by prefix special 'MANILA_OPTGROUP_'. function set_config_opts { # expects only one param - name of config group(s) as list separated by commas GROUP_NAMES=$1 if [[ -n "$GROUP_NAMES" ]]; then for be in ${GROUP_NAMES//,/ }; do # get backend_specific opt values prefix=MANILA_OPTGROUP_$be\_ ( set -o posix ; set ) | grep ^$prefix | while read -r line ; do # parse it to opt names and values opt=${line#$prefix} opt_name=${opt%%=*} opt_value=${opt##*=} iniset $MANILA_CONF $be $opt_name $opt_value done done fi } # set_cinder_quotas - Sets Cinder quotas, that is useful for generic driver, # which uses Cinder volumes and snapshots. function set_cinder_quotas { # Update Cinder configuration to make sure default quotas are enough # for Manila using Generic driver with parallel testing. if is_service_enabled cinder; then if [[ ! "$CINDER_CONF" ]]; then CINDER_CONF=/etc/cinder/cinder.conf fi iniset $CINDER_CONF DEFAULT quota_volumes 50 iniset $CINDER_CONF DEFAULT quota_snapshots 50 iniset $CINDER_CONF DEFAULT quota_gigabytes 1000 fi } # configure_manila - Set config files, create data dirs, etc function configure_manila { setup_develop $MANILA_DIR setup_develop $MANILACLIENT_DIR if [[ ! -d $MANILA_CONF_DIR ]]; then sudo mkdir -p $MANILA_CONF_DIR fi sudo chown $STACK_USER $MANILA_CONF_DIR cp -p $MANILA_DIR/etc/manila/policy.json $MANILA_CONF_DIR # Set the paths of certain binaries MANILA_ROOTWRAP=$(get_rootwrap_location manila) # If Manila ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $MANILA_ROOTWRAP ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP" if [[ -d $MANILA_DIR/etc/manila/rootwrap.d ]]; then # Wipe any existing rootwrap.d files first if [[ -d $MANILA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $MANILA_CONF_DIR/rootwrap.d fi # Deploy filters to /etc/manila/rootwrap.d sudo mkdir -m 755 $MANILA_CONF_DIR/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.d/*.filters $MANILA_CONF_DIR/rootwrap.d sudo chown -R root:root $MANILA_CONF_DIR/rootwrap.d sudo chmod 644 $MANILA_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/manila/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.conf $MANILA_CONF_DIR/ sudo sed -e "s:^filters_path=.*$:filters_path=$MANILA_CONF_DIR/rootwrap.d:" -i $MANILA_CONF_DIR/rootwrap.conf sudo chown root:root $MANILA_CONF_DIR/rootwrap.conf sudo chmod 0644 $MANILA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to manila-rootwrap MANILA_ROOTWRAP="$MANILA_ROOTWRAP $MANILA_CONF_DIR/rootwrap.conf" ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP *" fi TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_MANILA_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/manila-rootwrap cp $MANILA_DIR/etc/manila/api-paste.ini $MANILA_API_PASTE_INI # Remove old conf file if exists rm -f $MANILA_CONF configure_auth_token_middleware $MANILA_CONF manila $MANILA_AUTH_CACHE_DIR iniset $MANILA_CONF DEFAULT auth_strategy keystone iniset $MANILA_CONF DEFAULT debug True iniset $MANILA_CONF DEFAULT scheduler_driver $MANILA_SCHEDULER_DRIVER iniset $MANILA_CONF DEFAULT share_name_template ${SHARE_NAME_PREFIX}%s iniset $MANILA_CONF DATABASE connection `database_connection_url manila` iniset $MANILA_CONF DATABASE max_pool_size 40 iniset $MANILA_CONF DEFAULT api_paste_config $MANILA_API_PASTE_INI iniset $MANILA_CONF DEFAULT rootwrap_config $MANILA_CONF_DIR/rootwrap.conf iniset $MANILA_CONF DEFAULT osapi_share_extension manila.api.contrib.standard_extensions iniset $MANILA_CONF DEFAULT state_path $MANILA_STATE_PATH iniset $MANILA_CONF DEFAULT default_share_type $MANILA_DEFAULT_SHARE_TYPE iniset $MANILA_CONF DEFAULT enabled_share_protocols $MANILA_ENABLED_SHARE_PROTOCOLS iniset $MANILA_CONF oslo_concurrency lock_path $MANILA_LOCK_PATH iniset $MANILA_CONF DEFAULT wsgi_keep_alive False iniset $MANILA_CONF DEFAULT lvm_share_volume_group $SHARE_GROUP # Set the replica_state_update_interval iniset $MANILA_CONF DEFAULT replica_state_update_interval $MANILA_REPLICA_STATE_UPDATE_INTERVAL if is_service_enabled neutron; then configure_auth_token_middleware $MANILA_CONF neutron $MANILA_AUTH_CACHE_DIR neutron fi if is_service_enabled nova; then configure_auth_token_middleware $MANILA_CONF nova $MANILA_AUTH_CACHE_DIR nova fi if is_service_enabled cinder; then configure_auth_token_middleware $MANILA_CONF cinder $MANILA_AUTH_CACHE_DIR cinder fi # Note: set up config group does not mean that this backend will be enabled. # To enable it, specify its name explicitly using "enabled_share_backends" opt. configure_default_backends default_backends=$MANILA_BACKEND1_CONFIG_GROUP_NAME if [ "$MANILA_MULTI_BACKEND" = "True" ]; then default_backends+=,$MANILA_BACKEND2_CONFIG_GROUP_NAME fi if [ ! $MANILA_ENABLED_BACKENDS ]; then # If $MANILA_ENABLED_BACKENDS is not set, use configured backends by default export MANILA_ENABLED_BACKENDS=$default_backends fi iniset $MANILA_CONF DEFAULT enabled_share_backends $MANILA_ENABLED_BACKENDS if [ ! -f $MANILA_PATH_TO_PRIVATE_KEY ]; then ssh-keygen -N "" -t rsa -f $MANILA_PATH_TO_PRIVATE_KEY; fi iniset $MANILA_CONF DEFAULT manila_service_keypair_name $MANILA_SERVICE_KEYPAIR_NAME if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $MANILA_CONF DEFAULT osapi_share_listen_port $MANILA_SERVICE_PORT_INT fi if [ "$SYSLOG" != "False" ]; then iniset $MANILA_CONF DEFAULT use_syslog True fi iniset_rpc_backend manila $MANILA_CONF DEFAULT if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output iniset $MANILA_CONF DEFAULT logging_context_format_string \ "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" iniset $MANILA_CONF DEFAULT logging_default_format_string \ "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $MANILA_CONF DEFAULT logging_debug_format_suffix \ "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $MANILA_CONF DEFAULT logging_exception_prefix \ "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" fi MANILA_CONFIGURE_GROUPS=${MANILA_CONFIGURE_GROUPS:-"$MANILA_ENABLED_BACKENDS"} set_config_opts $MANILA_CONFIGURE_GROUPS set_config_opts DEFAULT } function configure_manila_ui { if is_service_enabled horizon && [ "$MANILA_UI_ENABLED" = "True" ]; then # NOTE(vponomaryov): workaround for devstack bug: 1540328 # where devstack install 'test-requirements' but should not do it # for manila-ui project as it installs Horizon from url. # Remove following two 'mv' commands when mentioned bug is fixed. mv $MANILA_UI_DIR/test-requirements.txt $MANILA_UI_DIR/_test-requirements.txt setup_develop $MANILA_UI_DIR cp $MANILA_UI_DIR/manila_ui/enabled/_90_manila_*.py $HORIZON_DIR/openstack_dashboard/local/enabled mv $MANILA_UI_DIR/_test-requirements.txt $MANILA_UI_DIR/test-requirements.txt fi } function create_manila_service_keypair { nova keypair-add $MANILA_SERVICE_KEYPAIR_NAME --pub-key $MANILA_PATH_TO_PUBLIC_KEY } # create_service_share_servers - creates service Nova VMs, one per generic # driver, and only if it is configured to mode without handling of share servers. function create_service_share_servers { private_net_id=$(nova net-list | grep ' private ' | get_field 1) created_admin_network=false for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do driver_handles_share_servers=$(iniget $MANILA_CONF $BE driver_handles_share_servers) share_driver=$(iniget $MANILA_CONF $BE share_driver) generic_driver='manila.share.drivers.generic.GenericShareDriver' if [[ $share_driver == $generic_driver ]]; then if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then vm_name='manila_service_share_server_'$BE nova boot $vm_name \ --flavor $MANILA_SERVICE_VM_FLAVOR_NAME \ --image $MANILA_SERVICE_IMAGE_NAME \ --nic net-id=$private_net_id \ --security-groups $MANILA_SERVICE_SECGROUP \ --key-name $MANILA_SERVICE_KEYPAIR_NAME vm_id=$(nova show $vm_name | grep ' id ' | get_field 2) iniset $MANILA_CONF $BE service_instance_name_or_id $vm_id iniset $MANILA_CONF $BE service_net_name_or_ip private iniset $MANILA_CONF $BE tenant_net_name_or_ip private else if is_service_enabled neutron; then if [ $created_admin_network == false ]; then admin_net_id=$(neutron net-create --tenant-id $TENANT_ID admin_net | grep ' id ' | get_field 2) admin_subnet_id=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --no-gateway --name admin_subnet --subnetpool None $admin_net_id $FIXED_RANGE | grep ' id ' | get_field 2) created_admin_network=true fi iniset $MANILA_CONF $BE admin_network_id $admin_net_id iniset $MANILA_CONF $BE admin_subnet_id $admin_subnet_id fi fi fi done configure_data_service_generic_driver } function configure_data_service_generic_driver { enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ }) share_driver=$(iniget $MANILA_CONF ${enabled_backends[0]} share_driver) generic_driver='manila.share.drivers.generic.GenericShareDriver' if [[ $share_driver == $generic_driver ]]; then driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers) if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then iniset $MANILA_CONF DEFAULT data_node_access_ip $PUBLIC_NETWORK_GATEWAY else iniset $MANILA_CONF DEFAULT data_node_access_ip $FIXED_RANGE fi fi } # create_manila_service_flavor - creates flavor, that will be used by backends # with configured generic driver to boot Nova VMs with. function create_manila_service_flavor { # Create flavor for Manila's service VM nova flavor-create \ $MANILA_SERVICE_VM_FLAVOR_NAME \ $MANILA_SERVICE_VM_FLAVOR_REF \ $MANILA_SERVICE_VM_FLAVOR_RAM \ $MANILA_SERVICE_VM_FLAVOR_DISK \ $MANILA_SERVICE_VM_FLAVOR_VCPUS } # create_manila_service_image - creates image, that will be used by backends # with configured generic driver to boot Nova VMs from. function create_manila_service_image { TOKEN=$(openstack token issue -c id -f value) # Download Manila's image if is_service_enabled g-reg; then upload_image $MANILA_SERVICE_IMAGE_URL $TOKEN fi } # create_manila_service_secgroup - creates security group that is used by # Nova VMs when generic driver is configured. function create_manila_service_secgroup { # Create a secgroup if ! nova secgroup-list | grep -q $MANILA_SERVICE_SECGROUP; then nova secgroup-create $MANILA_SERVICE_SECGROUP "$MANILA_SERVICE_SECGROUP description" if ! timeout 30 sh -c "while ! nova secgroup-list | grep -q $MANILA_SERVICE_SECGROUP; do sleep 1; done"; then echo "Security group not created" exit 1 fi fi # Configure Security Group Rules if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q icmp; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP icmp -1 -1 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 22 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP tcp 22 22 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 2049 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP tcp 2049 2049 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " udp .* 2049 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP udp 2049 2049 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " udp .* 445 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP udp 445 445 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 445 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP tcp 445 445 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 139 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP tcp 137 139 0.0.0.0/0 fi if ! nova secgroup-list-rules $MANILA_SERVICE_SECGROUP | grep -q " udp .* 139 "; then nova secgroup-add-rule $MANILA_SERVICE_SECGROUP udp 137 139 0.0.0.0/0 fi # List secgroup rules nova secgroup-list-rules $MANILA_SERVICE_SECGROUP } # create_manila_accounts - Set up common required manila accounts function create_manila_accounts { create_service_user "manila" if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then # Set up Manila v1 service and endpoint get_or_create_service "manila" "share" "Manila Shared Filesystem Service" get_or_create_endpoint "share" "$REGION_NAME" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v1/\$(tenant_id)s" # Set up Manila v2 service and endpoint get_or_create_service "manilav2" "sharev2" "Manila Shared Filesystem Service V2" get_or_create_endpoint "sharev2" "$REGION_NAME" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v2/\$(tenant_id)s" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v2/\$(tenant_id)s" \ "$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT/v2/\$(tenant_id)s" fi } # create_default_share_type - create share type that will be set as default. function create_default_share_type { echo "Waiting for Manila API to start..." if ! wait_for_service 60 $MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT; then die $LINENO "Manila did not start" fi enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ }) driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers) manila type-create $MANILA_DEFAULT_SHARE_TYPE $driver_handles_share_servers if [[ $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS ]]; then manila type-key $MANILA_DEFAULT_SHARE_TYPE set $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS fi } # init_manila - Initializes database and creates manila dir if absent function init_manila { if is_service_enabled $DATABASE_BACKENDS; then # (re)create manila database recreate_database manila $MANILA_BIN_DIR/manila-manage db sync if [[ $(trueorfalse False MANILA_USE_DOWNGRADE_MIGRATIONS) == True ]]; then # Use both - upgrade and downgrade migrations to verify that # downgrade migrations do not break structure of Manila database. $MANILA_BIN_DIR/manila-manage db downgrade $MANILA_BIN_DIR/manila-manage db sync fi # Display version as debug-action (see bug/1473400) $MANILA_BIN_DIR/manila-manage db version fi if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then if is_service_enabled m-shr; then # Configure a default volume group called '`lvm-shares`' for the share # service if it does not yet exist. If you don't wish to use a file backed # volume group, create your own volume group called ``stack-volumes`` before # invoking ``stack.sh``. # # By default, the backing file is 8G in size, and is stored in ``/opt/stack/data``. if ! sudo vgs $SHARE_GROUP; then if [ "$CONFIGURE_BACKING_FILE" = "True" ]; then SHARE_BACKING_FILE=${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file} # Only create if the file doesn't already exists [[ -f $SHARE_BACKING_FILE ]] || truncate -s $SHARE_BACKING_FILE_SIZE $SHARE_BACKING_FILE DEV=`sudo losetup -f --show $SHARE_BACKING_FILE` else DEV=$SHARE_BACKING_FILE fi # Only create if the loopback device doesn't contain $SHARE_GROUP if ! sudo vgs $SHARE_GROUP; then sudo vgcreate $SHARE_GROUP $DEV; fi fi mkdir -p $MANILA_STATE_PATH/shares fi elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then if is_service_enabled m-shr; then mkdir -p $MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR file_counter=0 for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do if [[ $file_counter == 0 ]]; then # NOTE(vponomaryov): create two pools for first ZFS backend # to cover different use cases that are supported by driver: # - Support of more than one zpool for share backend. # - Support of nested datasets. local first_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/alpha local second_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/betta truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $first_file truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $second_file sudo zpool create alpha $first_file sudo zpool create betta $second_file # Create subdir (nested dataset) for second pool sudo zfs create betta/subdir iniset $MANILA_CONF $BE zfs_zpool_list alpha,betta/subdir elif [[ $file_counter == 1 ]]; then local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/gamma truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file sudo zpool create gamma $file iniset $MANILA_CONF $BE zfs_zpool_list gamma else local filename=file"$file_counter" local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/"$filename" truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file sudo zpool create $filename $file iniset $MANILA_CONF $BE zfs_zpool_list $filename fi iniset $MANILA_CONF $BE zfs_share_export_ip $MANILA_ZFSONLINUX_SHARE_EXPORT_IP iniset $MANILA_CONF $BE zfs_service_ip $MANILA_ZFSONLINUX_SERVICE_IP iniset $MANILA_CONF $BE zfs_dataset_creation_options $MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS iniset $MANILA_CONF $BE zfs_use_ssh $MANILA_ZFSONLINUX_USE_SSH iniset $MANILA_CONF $BE zfs_ssh_username $MANILA_ZFSONLINUX_SSH_USERNAME iniset $MANILA_CONF $BE replication_domain $MANILA_ZFSONLINUX_REPLICATION_DOMAIN let "file_counter=file_counter+1" done # Install the server's SSH key in our known_hosts file eval STACK_HOME=~$STACK_USER ssh-keyscan ${MANILA_ZFSONLINUX_SERVICE_IP} >> $STACK_HOME/.ssh/known_hosts # If the server is this machine, setup trust for ourselves (otherwise you're on your own) if [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "127.0.0.1" ] || [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "localhost" ] ; then # Trust our own SSH keys eval SSH_USER_HOME=~$MANILA_ZFSONLINUX_SSH_USERNAME cat $STACK_HOME/.ssh/*.pub >> $SSH_USER_HOME/.ssh/authorized_keys # Give ssh user sudo access echo "$MANILA_ZFSONLINUX_SSH_USERNAME ALL=(ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers > /dev/null fi fi fi # Create cache dir sudo mkdir -p $MANILA_AUTH_CACHE_DIR sudo chown $STACK_USER $MANILA_AUTH_CACHE_DIR rm -f $MANILA_AUTH_CACHE_DIR/* } # install_manila - Collect source and prepare function install_manila { git_clone $MANILACLIENT_REPO $MANILACLIENT_DIR $MANILACLIENT_BRANCH if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then if is_service_enabled m-shr; then if is_ubuntu; then sudo apt-get install -y nfs-kernel-server nfs-common samba elif is_fedora; then sudo yum install -y nfs-utils nfs-utils-lib samba fi fi elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then if is_service_enabled m-shr; then if is_ubuntu; then sudo apt-get install -y nfs-kernel-server nfs-common samba # NOTE(vponomaryov): following installation is valid for Ubuntu 'trusty'. sudo apt-get install -y software-properties-common sudo apt-add-repository --yes ppa:zfs-native/stable sudo apt-get -y -q update && sudo apt-get -y -q upgrade sudo apt-get install -y linux-headers-generic sudo apt-get install -y build-essential sudo apt-get install -y ubuntu-zfs sudo modprobe zfs else echo "Manila Devstack plugin does not support installation "\ "of ZFS packages for non-'Ubuntu-trusty' distros. "\ "Please, install it first by other means or add its support "\ "for your distro." exit 1 fi fi fi # install manila-ui if horizon is enabled if is_service_enabled horizon && [ "$MANILA_UI_ENABLED" = "True" ]; then git_clone $MANILA_UI_REPO $MANILA_UI_DIR $MANILA_UI_BRANCH fi } #configure_samba - Configure node as Samba server function configure_samba { if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then # TODO(vponomaryov): add here condition for ZFSonLinux driver too # when it starts to support SAMBA samba_daemon_name=smbd if is_service_enabled m-shr; then if is_fedora; then samba_daemon_name=smb fi sudo service $samba_daemon_name restart || echo "Couldn't restart '$samba_daemon_name' service" fi sudo cp /usr/share/samba/smb.conf $SMB_CONF sudo chown $STACK_USER -R /etc/samba iniset $SMB_CONF global include registry iniset $SMB_CONF global security user if [ ! -d "$SMB_PRIVATE_DIR" ]; then sudo mkdir $SMB_PRIVATE_DIR sudo touch $SMB_PRIVATE_DIR/secrets.tdb fi for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do iniset $MANILA_CONF $backend_name driver_handles_share_servers False iniset $MANILA_CONF $backend_name lvm_share_export_ip $HOST_IP done iniset $MANILA_CONF DEFAULT data_node_access_ip $HOST_IP fi } # start_manila - Start running processes, including screen function start_manila { # restart apache to reload running horizon if manila-ui is enabled if is_service_enabled horizon && [ "$MANILA_UI_ENABLED" = "True" ]; then restart_apache_server sleep 3 # Wait for 3 sec to ensure that apache is running fi screen_it m-api "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-api --config-file $MANILA_CONF" screen_it m-shr "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-share --config-file $MANILA_CONF" screen_it m-sch "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-scheduler --config-file $MANILA_CONF" screen_it m-dat "cd $MANILA_DIR && $MANILA_BIN_DIR/manila-data --config-file $MANILA_CONF" # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy '*' $MANILA_SERVICE_PORT $MANILA_SERVICE_HOST $MANILA_SERVICE_PORT_INT & fi } # stop_manila - Stop running processes function stop_manila { # Kill the manila screen windows for serv in m-api m-sch m-shr m-dat; do screen -S $SCREEN_NAME -p $serv -X kill done } # update_tempest - Function used for updating Tempest config if Tempest service enabled function update_tempest { if is_service_enabled tempest; then if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then iniset $TEMPEST_DIR/etc/tempest.conf share image_password $MANILA_SERVICE_INSTANCE_PASSWORD fi iniset $TEMPEST_DIR/etc/tempest.conf share image_with_share_tools $MANILA_SERVICE_IMAGE_NAME fi } function install_libraries { if [ $(trueorfalse False MANILA_MULTI_BACKEND) == True ]; then if [ $(trueorfalse True RUN_MANILA_MIGRATION_TESTS) == True ]; then if is_ubuntu; then install_package nfs-common else install_package nfs-utils fi fi fi } # Main dispatcher if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Manila" install_manila set_cinder_quotas elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Manila" configure_manila echo_summary "Initializing Manila" init_manila echo_summary "Installing extra libraries" install_libraries # Neutron config update if is_service_enabled neutron; then iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv False fi # Cinder config update if is_service_enabled cinder && [[ -n "$CINDER_OVERSUBSCRIPTION_RATIO" ]]; then CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf} CINDER_ENABLED_BACKENDS=$(iniget $CINDER_CONF DEFAULT enabled_backends) for BN in ${CINDER_ENABLED_BACKENDS//,/ }; do iniset $CINDER_CONF $BN lvm_max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO done iniset $CINDER_CONF DEFAULT max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Creating Manila entities for auth service" create_manila_accounts if is_service_enabled nova; then echo_summary "Creating Manila service flavor" create_manila_service_flavor echo_summary "Creating Manila service security group" create_manila_service_secgroup fi # Skip image downloads when disabled. # This way vendor Manila driver CI tests can skip # this potentially long and unnecessary download. if [ "$MANILA_SERVICE_IMAGE_ENABLED" = "True" ]; then echo_summary "Creating Manila service image" create_manila_service_image else echo_summary "Skipping download of Manila service image" fi if is_service_enabled nova; then echo_summary "Creating Manila service keypair" create_manila_service_keypair echo_summary "Creating Manila service VMs for generic driver \ backends for which handlng of share servers is disabled." create_service_share_servers fi echo_summary "Configure Samba server" configure_samba echo_summary "Starting Manila" start_manila echo_summary "Creating Manila default share type" create_default_share_type echo_summary "Update Tempest config" update_tempest echo_summary "Configuring Manila UI" configure_manila_ui fi if [[ "$1" == "unstack" ]]; then cleanup_manila fi if [[ "$1" == "clean" ]]; then cleanup_manila sudo rm -rf /etc/manila fi # Restore xtrace $XTRACE manila-2.0.0/devstack/files/0000775000567000056710000000000012701407265017025 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/files/debs/0000775000567000056710000000000012701407265017742 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/files/debs/manila0000664000567000056710000000000512701407107021114 0ustar jenkinsjenkins00000000000000lvm2 manila-2.0.0/devstack/files/rpms-suse/0000775000567000056710000000000012701407265020763 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/files/rpms-suse/manila0000664000567000056710000000000512701407107022135 0ustar jenkinsjenkins00000000000000lvm2 manila-2.0.0/devstack/files/rpms/0000775000567000056710000000000012701407265020006 5ustar jenkinsjenkins00000000000000manila-2.0.0/devstack/files/rpms/manila0000664000567000056710000000000512701407107021160 0ustar jenkinsjenkins00000000000000lvm2 manila-2.0.0/devstack/README.rst0000664000567000056710000000041112701407107017401 0ustar jenkinsjenkins00000000000000====================== Enabling in Devstack ====================== 1. Download DevStack 2. Add this repo as an external repository:: > cat local.conf [[local|localrc]] enable_plugin manila https://github.com/openstack/manila 3. run ``stack.sh`` manila-2.0.0/etc/0000775000567000056710000000000012701407265014672 5ustar jenkinsjenkins00000000000000manila-2.0.0/etc/manila/0000775000567000056710000000000012701407265016133 5ustar jenkinsjenkins00000000000000manila-2.0.0/etc/manila/rootwrap.d/0000775000567000056710000000000012701407265020232 5ustar jenkinsjenkins00000000000000manila-2.0.0/etc/manila/rootwrap.d/share.filters0000664000567000056710000001454512701407107022732 0ustar jenkinsjenkins00000000000000# manila-rootwrap command filters for share nodes # This file should be owned by (and only-writeable by) the root user [Filters] # manila/utils.py : 'chown', '%s', '%s' chown: CommandFilter, chown, root # manila/utils.py : 'cat', '%s' cat: CommandFilter, cat, root # manila/share/drivers/lvm.py: 'mkfs.ext4', '/dev/mapper/%s' mkfs.ext4: CommandFilter, mkfs.ext4, root # manila/share/drivers/lvm.py: 'mkfs.ext3', '/dev/mapper/%s' mkfs.ext3: CommandFilter, mkfs.ext3, root # manila/share/drivers/lvm.py: 'smbd', '-s', '%s', '-D' smbd: CommandFilter, smbd, root smb: CommandFilter, smb, root # manila/share/drivers/lvm.py: 'rmdir', '%s' rmdir: CommandFilter, rmdir, root # manila/share/drivers/lvm.py: 'dd' 'count=0', 'if=%s' % srcstr, 'of=%s' dd: CommandFilter, dd, root # manila/share/drivers/lvm.py: 'fsck', '-pf', %s fsck: CommandFilter, fsck, root # manila/share/drivers/lvm.py: 'resize2fs', %s resize2fs: CommandFilter, resize2fs, root # manila/share/drivers/helpers.py: 'smbcontrol', 'all', 'close-share', '%s' smbcontrol: CommandFilter, smbcontrol, root # manila/share/drivers/helpers.py: 'net', 'conf', 'addshare', '%s', '%s', 'writeable=y', 'guest_ok=y # manila/share/drivers/helpers.py: 'net', 'conf', 'delshare', '%s' # manila/share/drivers/helpers.py: 'net', 'conf', 'setparm', '%s', '%s', '%s' # manila/share/drivers/helpers.py: 'net', 'conf', 'getparm', '%s', 'hosts allow' net: CommandFilter, net, root # manila/share/drivers/lvm.py: 'lvremove', '-f', "%s/%s lvremove: CommandFilter, lvremove, root # manila/share/drivers/lvm.py: 'lvextend', '-L', '%sG''-n', %s lvextend: CommandFilter, lvextend, root # manila/share/drivers/lvm.py: 'lvcreate', '-L', %s, '-n', %s lvcreate: CommandFilter, lvcreate, root # manila/share/drivers/lvm.py: 'vgs', '--noheadings', '-o', 'name' # manila/share/drivers/lvm.py: 'vgs', %s, '--rows', '--units', 'g' vgs: CommandFilter, vgs, root # manila/share/drivers/glusterfs.py: 'mkdir', '%s' # manila/share/drivers/ganesha/manager.py: 'mkdir', '-p', '%s' mkdir: CommandFilter, mkdir, root # manila/share/drivers/glusterfs.py: 'rm', '-rf', '%s' rm: CommandFilter, rm, root # manila/share/drivers/glusterfs.py: 'mount', '-t', 'glusterfs', '%s', '%s' # manila/share/drivers/glusterfs/glusterfs_native.py: 'mount', '-t', 'glusterfs', '%s', '%s' mount: CommandFilter, mount, root # manila/share/drivers/glusterfs.py: 'gluster', '--xml', 'volume', 'info', '%s' # manila/share/drivers/glusterfs.py: 'gluster', 'volume', 'set', '%s', 'nfs.export-dir', '%s' gluster: CommandFilter, gluster, root # manila/network/linux/ip_lib.py: 'ip', 'netns', 'exec', '%s', '%s' ip: CommandFilter, ip, root # manila/network/linux/interface.py: 'ovs-vsctl', 'add-port', '%s', '%s' ovs-vsctl: CommandFilter, ovs-vsctl, root # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '!', '-path', '%s', '!', '-path', '%s', '-delete' # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '-delete' find: CommandFilter, find, root # manila/share/drivers/glusterfs/glusterfs_native.py: 'umount', '%s' umount: CommandFilter, umount, root # GPFS commands # manila/share/drivers/ibm/gpfs.py: 'mmgetstate', '-Y' mmgetstate: CommandFilter, mmgetstate, root # manila/share/drivers/ibm/gpfs.py: 'mmlsattr', '%s' mmlsattr: CommandFilter, mmlsattr, root # manila/share/drivers/ibm/gpfs.py: 'mmcrfileset', '%s', '%s', '--inode-space', 'new' mmcrfileset: CommandFilter, mmcrfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmlinkfileset', '%s', '%s', '-J', '%s' mmlinkfileset: CommandFilter, mmlinkfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmsetquota', '-j', '%s', '-h', '%s', '%s' mmsetquota: CommandFilter, mmsetquota, root # manila/share/drivers/ibm/gpfs.py: 'mmunlinkfileset', '%s', '%s', '-f' mmunlinkfileset: CommandFilter, mmunlinkfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmdelfileset', '%s', '%s', '-f' mmdelfileset: CommandFilter, mmdelfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmcrsnapshot', '%s', '%s', '-j', '%s' mmcrsnapshot: CommandFilter, mmcrsnapshot, root # manila/share/drivers/ibm/gpfs.py: 'mmdelsnapshot', '%s', '%s', '-j', '%s' mmdelsnapshot: CommandFilter, mmdelsnapshot, root # manila/share/drivers/ibm/gpfs.py: 'rsync', '-rp', '%s', '%s' rsync: CommandFilter, rsync, root # manila/share/drivers/ibm/gpfs.py: 'exportfs' exportfs: CommandFilter, exportfs, root # manila/share/drivers/ibm/gpfs.py: 'stat', '--format=%F', '%s' stat: CommandFilter, stat, root # manila/share/drivers/ibm/gpfs.py: 'df', '-P', '-B', '1', '%s' df: CommandFilter, df, root # Ganesha commands # manila/share/drivers/ibm/ganesha_utils.py: 'mv', '%s', '%s' # manila/share/drivers/ganesha/manager.py: 'mv', '%s', '%s' mv: CommandFilter, mv, root # manila/share/drivers/ibm/ganesha_utils.py: 'cp', '%s', '%s' cp: CommandFilter, cp, root # manila/share/drivers/ibm/ganesha_utils.py: 'scp', '-i', '%s', '%s', '%s' scp: CommandFilter, scp, root # manila/share/drivers/ibm/ganesha_utils.py: 'ssh', '%s', '%s' ssh: CommandFilter, ssh, root # manila/share/drivers/ibm/ganesha_utils.py: 'chmod', '%s', '%s' chmod: CommandFilter, chmod, root # manila/share/drivers/ibm/ganesha_utils.py: 'service', '%s', 'restart' service: CommandFilter, service, root # manila/share/drivers/ganesha/manager.py: 'mktemp', '-p', '%s', '-t', '%s' mktemp: CommandFilter, mktemp, root # manila/share/drivers/ganesha/manager.py: shcat: RegExpFilter, sh, root, sh, -c, echo '((.|\n)*)' > /.* # manila/share/drivers/ganesha/manager.py: dbus-addexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\.ganesha\.nfsd, /org/ganesha/nfsd/ExportMgr, org\.ganesha\.nfsd\.exportmgr\.(Add|Remove)Export, .*, .* # manila/share/drivers/ganesha/manager.py: dbus-removeexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\.ganesha\.nfsd, /org/ganesha/nfsd/ExportMgr, org\.ganesha\.nfsd\.exportmgr\.(Add|Remove)Export, .* # manila/share/drivers/ganesha/manager.py: rmconf: RegExpFilter, sh, root, sh, -c, rm -f /.*/\*\.conf$ # ZFS commands # manila/share/drivers/zfsonlinux/driver.py # manila/share/drivers/zfsonlinux/utils.py zpool: CommandFilter, zpool, root # manila/share/drivers/zfsonlinux/driver.py # manila/share/drivers/zfsonlinux/utils.py zfs: CommandFilter, zfs, root # manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s' ls: CommandFilter, ls, root # manila/data/utils.py: 'touch', '--reference=%s', '%s' touch: CommandFilter, touch, root manila-2.0.0/etc/manila/rootwrap.conf0000664000567000056710000000173512701407107020660 0ustar jenkinsjenkins00000000000000# Configuration for manila-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/manila/rootwrap.d,/usr/share/manila/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin,/usr/lpp/mmfs/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR manila-2.0.0/etc/manila/policy.json0000664000567000056710000001225012701407107020320 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", "admin_api": "is_admin:True", "availability_zone:index": "rule:default", "quota_set:update": "rule:admin_api", "quota_set:show": "rule:default", "quota_set:delete": "rule:admin_api", "quota_class_set:show": "rule:default", "quota_class_set:update": "rule:admin_api", "service:index": "rule:admin_api", "service:update": "rule:admin_api", "share:create": "", "share:delete": "rule:default", "share:get": "rule:default", "share:get_all": "rule:default", "share:list_by_share_server_id": "rule:admin_api", "share:update": "rule:default", "share:access_get": "rule:default", "share:access_get_all": "rule:default", "share:allow_access": "rule:default", "share:deny_access": "rule:default", "share:extend": "rule:default", "share:shrink": "rule:default", "share:get_share_metadata": "rule:default", "share:delete_share_metadata": "rule:default", "share:update_share_metadata": "rule:default", "share:migration_start": "rule:admin_api", "share:migration_complete": "rule:admin_api", "share:migration_cancel": "rule:admin_api", "share:migration_get_progress": "rule:admin_api", "share:reset_task_state": "rule:admin_api", "share:manage": "rule:admin_api", "share:unmanage": "rule:admin_api", "share:force_delete": "rule:admin_api", "share:reset_status": "rule:admin_api", "share_export_location:index": "rule:default", "share_export_location:show": "rule:default", "share_instance:index": "rule:admin_api", "share_instance:show": "rule:admin_api", "share_instance:force_delete": "rule:admin_api", "share_instance:reset_status": "rule:admin_api", "share_instance_export_location:index": "rule:admin_api", "share_instance_export_location:show": "rule:admin_api", "share_snapshot:create_snapshot": "rule:default", "share_snapshot:delete_snapshot": "rule:default", "share_snapshot:get_snapshot": "rule:default", "share_snapshot:get_all_snapshots": "rule:default", "share_snapshot:snapshot_update": "rule:default", "share_snapshot:manage_snapshot": "rule:admin_api", "share_snapshot:unmanage_snapshot": "rule:admin_api", "share_snapshot:force_delete": "rule:admin_api", "share_snapshot:reset_status": "rule:admin_api", "share_type:index": "rule:default", "share_type:show": "rule:default", "share_type:default": "rule:default", "share_type:create": "rule:admin_api", "share_type:delete": "rule:admin_api", "share_type:add_project_access": "rule:admin_api", "share_type:list_project_access": "rule:admin_api", "share_type:remove_project_access": "rule:admin_api", "share_types_extra_spec:create": "rule:admin_api", "share_types_extra_spec:update": "rule:admin_api", "share_types_extra_spec:show": "rule:admin_api", "share_types_extra_spec:index": "rule:admin_api", "share_types_extra_spec:delete": "rule:admin_api", "security_service:create": "rule:default", "security_service:delete": "rule:default", "security_service:update": "rule:default", "security_service:show": "rule:default", "security_service:index": "rule:default", "security_service:detail": "rule:default", "security_service:get_all_security_services": "rule:admin_api", "share_server:index": "rule:admin_api", "share_server:show": "rule:admin_api", "share_server:details": "rule:admin_api", "share_server:delete": "rule:admin_api", "share_network:create": "rule:default", "share_network:delete": "rule:default", "share_network:update": "rule:default", "share_network:index": "rule:default", "share_network:detail": "rule:default", "share_network:show": "rule:default", "share_network:add_security_service": "rule:default", "share_network:remove_security_service": "rule:default", "share_network:get_all_share_networks": "rule:admin_api", "scheduler_stats:pools:index": "rule:admin_api", "scheduler_stats:pools:detail": "rule:admin_api", "consistency_group:create" : "rule:default", "consistency_group:delete": "rule:default", "consistency_group:update": "rule:default", "consistency_group:get": "rule:default", "consistency_group:get_all": "rule:default", "consistency_group:force_delete": "rule:admin_api", "consistency_group:reset_status": "rule:admin_api", "cgsnapshot:force_delete": "rule:admin_api", "cgsnapshot:reset_status": "rule:admin_api", "cgsnapshot:create" : "rule:default", "cgsnapshot:update" : "rule:default", "cgsnapshot:delete": "rule:default", "cgsnapshot:get_cgsnapshot": "rule:default", "cgsnapshot:get_all": "rule:default", "share_replica:get_all": "rule:default", "share_replica:show": "rule:default", "share_replica:create" : "rule:default", "share_replica:delete": "rule:default", "share_replica:promote": "rule:default", "share_replica:resync": "rule:admin_api", "share_replica:reset_status": "rule:admin_api", "share_replica:force_delete": "rule:admin_api", "share_replica:reset_replica_state": "rule:admin_api" } manila-2.0.0/etc/manila/api-paste.ini0000664000567000056710000000332112701407107020511 0ustar jenkinsjenkins00000000000000############# # OpenStack # ############# [composite:osapi_share] use = call:manila.api:root_app_factory /: apiversions /v1: openstack_share_api /v2: openstack_share_api_v2 [composite:openstack_share_api] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors faultwrap ssl sizelimit noauth api keystone = cors faultwrap ssl sizelimit authtoken keystonecontext api keystone_nolimit = cors faultwrap ssl sizelimit authtoken keystonecontext api [composite:openstack_share_api_v2] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors faultwrap ssl sizelimit noauth apiv2 keystone = cors faultwrap ssl sizelimit authtoken keystonecontext apiv2 keystone_nolimit = cors faultwrap ssl sizelimit authtoken keystonecontext apiv2 [filter:faultwrap] paste.filter_factory = manila.api.middleware.fault:FaultWrapper.factory [filter:noauth] paste.filter_factory = manila.api.middleware.auth:NoAuthMiddleware.factory [filter:sizelimit] paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory [filter:ssl] paste.filter_factory = oslo_middleware.ssl:SSLMiddleware.factory [app:api] paste.app_factory = manila.api.v1.router:APIRouter.factory [app:apiv2] paste.app_factory = manila.api.v2.router:APIRouter.factory [pipeline:apiversions] pipeline = cors faultwrap osshareversionapp [app:osshareversionapp] paste.app_factory = manila.api.versions:VersionsRouter.factory ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = manila.api.middleware.auth:ManilaKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = manila manila-2.0.0/etc/manila/logging_sample.conf0000664000567000056710000000236712701407107021774 0ustar jenkinsjenkins00000000000000[loggers] keys = root, manila [handlers] keys = stderr, stdout, watchedfile, syslog, null [formatters] keys = default [logger_root] level = WARNING handlers = null [logger_manila] level = INFO handlers = stderr qualname = manila [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = default [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = default [handler_watchedfile] class = handlers.WatchedFileHandler args = ('manila.log',) formatter = default [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = default [handler_null] class = manila.common.openstack.NullHandler formatter = default args = () [formatter_default] format = %(message)s manila-2.0.0/etc/manila/README.manila.conf0000664000567000056710000000020012701407107021162 0ustar jenkinsjenkins00000000000000To generate the sample manila.conf file, run the following command from the top level of the manila directory: tox -egenconfig manila-2.0.0/etc/oslo-config-generator/0000775000567000056710000000000012701407265021075 5ustar jenkinsjenkins00000000000000manila-2.0.0/etc/oslo-config-generator/manila.conf0000664000567000056710000000034312701407107023200 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/manila/manila.conf.sample namespace = manila namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.db namespace = oslo.db.concurrency namespace = keystonemiddleware.auth_token manila-2.0.0/babel.cfg0000664000567000056710000000002112701407107015631 0ustar jenkinsjenkins00000000000000[python: **.py] manila-2.0.0/openstack-common.conf0000664000567000056710000000057012701407107020240 0ustar jenkinsjenkins00000000000000[DEFAULT] # The list of modules to copy from openstack-common module=scheduler module=scheduler.filters module=scheduler.weights # The list of scripts to copy from oslo common code script = tools/colorizer.py script = tools/install_venv.py script = tools/install_venv_common.py script = tools/with_venv.sh # The base module to hold the copy of openstack.common base=manila manila-2.0.0/.coveragerc0000664000567000056710000000014312701407107016231 0ustar jenkinsjenkins00000000000000[run] branch = True source = manila omit = manila/test*,manila/openstack/* [report] precision = 2 manila-2.0.0/setup.cfg0000664000567000056710000000635012701407265015744 0ustar jenkinsjenkins00000000000000[metadata] name = manila summary = Shared Storage for OpenStack description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [global] setup-hooks = pbr.hooks.setup_hook [files] packages = manila manila_tempest_tests [entry_points] console_scripts = manila-all = manila.cmd.all:main manila-api = manila.cmd.api:main manila-data = manila.cmd.data:main manila-manage = manila.cmd.manage:main manila-rootwrap = oslo_rootwrap.cmd:main manila-scheduler = manila.cmd.scheduler:main manila-share = manila.cmd.share:main manila.scheduler.filters = AvailabilityZoneFilter = manila.scheduler.filters.availability_zone:AvailabilityZoneFilter CapabilitiesFilter = manila.scheduler.filters.capabilities:CapabilitiesFilter CapacityFilter = manila.scheduler.filters.capacity:CapacityFilter ConsistencyGroupFilter = manila.scheduler.filters.consistency_group:ConsistencyGroupFilter IgnoreAttemptedHostsFilter = manila.scheduler.filters.ignore_attempted_hosts:IgnoreAttemptedHostsFilter JsonFilter = manila.scheduler.filters.json:JsonFilter RetryFilter = manila.scheduler.filters.retry:RetryFilter ShareReplicationFilter = manila.scheduler.filters.share_replication:ShareReplicationFilter manila.scheduler.weighers = CapacityWeigher = manila.scheduler.weighers.capacity:CapacityWeigher PoolWeigher = manila.scheduler.weighers.pool:PoolWeigher oslo_messaging.notify.drivers = manila.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver manila.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver manila.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver manila.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver manila.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver oslo.config.opts = manila = manila.opts:list_opts oslo.config.opts.defaults = manila = manila.common.config:set_middleware_defaults manila.share.drivers.emc.plugins = vnx = manila.share.drivers.emc.plugins.vnx.connection:VNXStorageConnection isilon = manila.share.drivers.emc.plugins.isilon.isilon:IsilonStorageConnection manila.tests.scheduler.fakes = FakeWeigher1 = manila.tests.scheduler.fakes:FakeWeigher1 FakeWeigher2 = manila.tests.scheduler.fakes:FakeWeigher2 tempest.test_plugins = manila_tests = manila_tempest_tests.plugin:ManilaTempestPlugin [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = manila/locale domain = manila [update_catalog] domain = manila output_dir = manila/locale input_file = manila/locale/manila.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = manila/locale/manila.pot [wheel] universal = 1 [pbr] warnerrors = true manila-2.0.0/manila_tempest_tests/0000775000567000056710000000000012701407265020343 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/plugin.py0000664000567000056710000000346712701407107022220 0ustar jenkinsjenkins00000000000000# Copyright 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempest import config from tempest.test_discover import plugins from manila_tempest_tests import config as config_share class ManilaTempestPlugin(plugins.TempestPlugin): def load_tests(self): base_path = os.path.split(os.path.dirname( os.path.abspath(__file__)))[0] test_dir = "manila_tempest_tests/tests" full_test_dir = os.path.join(base_path, test_dir) return full_test_dir, base_path def register_opts(self, conf): config.register_opt_group( conf, config_share.service_available_group, config_share.ServiceAvailableGroup) config.register_opt_group(conf, config_share.share_group, config_share.ShareGroup) # NOTE(vponomaryov): set opt 'capability_snapshot_support' by # default equal to opt 'run_snapshot_tests'. if conf.share.capability_snapshot_support is None: conf.set_default( "capability_snapshot_support", conf.share.run_snapshot_tests, group="share", ) def get_opt_lists(self): return [(config_share.share_group.name, config_share.ShareGroup)] manila-2.0.0/manila_tempest_tests/clients_share.py0000664000567000056710000000321312701407107023532 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import clients from tempest.common import credentials_factory as common_creds from manila_tempest_tests.services.share.json import shares_client from manila_tempest_tests.services.share.v2.json import shares_client \ as shares_v2_client class Manager(clients.Manager): def __init__( self, credentials=common_creds.get_configured_credentials('user'), service=None): super(Manager, self).__init__(credentials, service) self.shares_client = shares_client.SharesClient(self.auth_provider) self.shares_v2_client = shares_v2_client.SharesV2Client( self.auth_provider) class AltManager(Manager): def __init__(self, service=None): super(AltManager, self).__init__( common_creds.get_configured_credentials('alt_user'), service) class AdminManager(Manager): def __init__(self, service=None): super(AdminManager, self).__init__( common_creds.get_configured_credentials('identity_admin'), service) manila-2.0.0/manila_tempest_tests/tests/0000775000567000056710000000000012701407265021505 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/__init__.py0000664000567000056710000000000012701407107023577 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/api/0000775000567000056710000000000012701407265022256 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/api/test_availability_zones_negative.py0000664000567000056710000000303612701407107031436 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc from tempest import test from manila_tempest_tests.tests.api import base @base.skip_if_microversion_not_supported("2.7") class AvailabilityZonesNegativeTest(base.BaseSharesTest): @test.attr(type=["smoke", "gate"]) def test_list_availability_zones_api_not_found_with_legacy_url(self): # NOTE(vponomaryov): remove this test with removal of availability zone # extension url support. self.assertRaises( lib_exc.NotFound, self.shares_v2_client.list_availability_zones, url='os-availability-zone', version='2.7', ) @test.attr(type=["smoke", "gate"]) def test_list_availability_zones_api_not_found(self): self.assertRaises( lib_exc.NotFound, self.shares_v2_client.list_availability_zones, url='availability-zones', version='2.6', ) manila-2.0.0/manila_tempest_tests/tests/api/test_security_services_mapping_negative.py0000664000567000056710000001631212701407107033034 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log # noqa import six # noqa from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF LOG = log.getLogger(__name__) class SecServicesMappingNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SecServicesMappingNegativeTest, cls).resource_setup() cls.sn = cls.create_share_network(cleanup_in_class=True) cls.ss = cls.create_security_service(cleanup_in_class=True) cls.cl = cls.shares_client @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_twice_to_share_network(self): self.cl.add_sec_service_to_share_network(self.sn["id"], self.ss["id"]) self.assertRaises(lib_exc.Conflict, self.cl.add_sec_service_to_share_network, self.sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_add_nonexistant_sec_service_to_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, self.sn["id"], "wrong_ss_id") @test.attr(type=["gate", "smoke", "negative"]) def test_add_empty_sec_service_id_to_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, self.sn["id"], "") @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_to_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, "wrong_sn_id", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_add_sec_service_to_share_network_with_empty_id(self): self.assertRaises(lib_exc.NotFound, self.cl.add_sec_service_to_share_network, "", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_list_sec_services_for_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.list_sec_services_for_share_network, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_delete_nonexistant_sec_service_from_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, self.sn["id"], "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_delete_sec_service_from_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, "wrong_id", self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_delete_nonexistant_ss_from_nonexistant_sn(self): self.assertRaises(lib_exc.NotFound, self.cl.remove_sec_service_from_share_network, "wrong_id", "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_delete_ss_from_sn_used_by_share_server(self): sn = self.shares_client.get_share_network( self.os.shares_client.share_network_id) fresh_sn = self.create_share_network( neutron_net_id=sn["neutron_net_id"], neutron_subnet_id=sn["neutron_subnet_id"]) self.shares_client.add_sec_service_to_share_network( fresh_sn["id"], self.ss["id"]) # Security service with fake data is used, so if we use backend driver # that fails on wrong data, we expect error here. # We require any share that uses our share-network. try: self.create_share( share_network_id=fresh_sn["id"], cleanup_in_class=False) except Exception as e: # we do wait for either 'error' or 'available' status because # it is the only available statuses for proper deletion. LOG.warning("Caught exception. It is expected in case backend " "fails having security-service with improper data " "that leads to share-server creation error. " "%s" % six.text_type(e)) self.assertRaises(lib_exc.Forbidden, self.cl.remove_sec_service_from_share_network, fresh_sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_map_two_ss_with_same_type_to_sn(self): # create share network data = self.generate_share_network_data() sn = self.create_share_network(client=self.cl, **data) self.assertDictContainsSubset(data, sn) # create security services with same type security_services = [] for i in range(2): data = self.generate_security_service_data() ss = self.create_security_service(client=self.cl, **data) self.assertDictContainsSubset(data, ss) security_services.insert(i, ss) # Add security service to share network self.cl.add_sec_service_to_share_network( sn["id"], security_services[0]["id"]) # Try to add security service with same type self.assertRaises(lib_exc.Conflict, self.cl.add_sec_service_to_share_network, sn["id"], security_services[1]["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_ss_that_assigned_to_sn(self): # create share network data = self.generate_share_network_data() sn = self.create_share_network(client=self.cl, **data) self.assertDictContainsSubset(data, sn) # create security service data = self.generate_security_service_data() ss = self.create_security_service(client=self.cl, **data) self.assertDictContainsSubset(data, ss) # Add security service to share network self.cl.add_sec_service_to_share_network(sn["id"], ss["id"]) # Try delete ss, that has been assigned to some sn self.assertRaises(lib_exc.Forbidden, self.cl.delete_security_service, ss["id"], ) # remove seurity service from share-network self.cl.remove_sec_service_from_share_network(sn["id"], ss["id"]) manila-2.0.0/manila_tempest_tests/tests/api/test_shares_actions_negative.py0000664000567000056710000001164412701407107030557 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base CONF = config.CONF class SharesActionsNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SharesActionsNegativeTest, cls).resource_setup() cls.share = cls.create_share( size=1, ) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_extend_tests, "Share extend tests are disabled.") def test_share_extend_over_quota(self): tenant_quotas = self.shares_client.show_quotas( self.shares_client.tenant_id) new_size = int(tenant_quotas["gigabytes"]) + 1 # extend share with over quota and check result self.assertRaises(lib_exc.Forbidden, self.shares_client.extend_share, self.share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_extend_tests, "Share extend tests are disabled.") def test_share_extend_with_less_size(self): new_size = int(self.share['size']) - 1 # extend share with invalid size and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.extend_share, self.share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_extend_tests, "Share extend tests are disabled.") def test_share_extend_with_same_size(self): new_size = int(self.share['size']) # extend share with invalid size and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.extend_share, self.share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_extend_tests, "Share extend tests are disabled.") def test_share_extend_with_invalid_share_state(self): share = self.create_share(size=1, cleanup_in_class=False) new_size = int(share['size']) + 1 # set "error" state admin_client = clients.AdminManager().shares_client admin_client.reset_state(share['id']) # run extend operation on same share and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.extend_share, share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_shrink_tests, "Share shrink tests are disabled.") def test_share_shrink_with_greater_size(self): new_size = int(self.share['size']) + 1 # shrink share with invalid size and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.shrink_share, self.share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_shrink_tests, "Share shrink tests are disabled.") def test_share_shrink_with_same_size(self): new_size = int(self.share['size']) # shrink share with invalid size and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.shrink_share, self.share['id'], new_size) @test.attr(type=["negative", ]) @testtools.skipUnless( CONF.share.run_shrink_tests, "Share shrink tests are disabled.") def test_share_shrink_with_invalid_share_state(self): share = self.create_share(size=2, cleanup_in_class=False) new_size = int(share['size']) - 1 # set "error" state admin_client = clients.AdminManager().shares_client admin_client.reset_state(share['id']) # run shrink operation on same share and check result self.assertRaises(lib_exc.BadRequest, self.shares_client.shrink_share, share['id'], new_size) manila-2.0.0/manila_tempest_tests/tests/api/test_microversions.py0000664000567000056710000001454712701407107026577 0ustar jenkinsjenkins00000000000000# Copyright 2015 Goutham Pacha Ravi # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest import test from manila_tempest_tests.tests.api import base CONF = config.CONF API_MICROVERSIONS_HEADER_LOWER = 'x-openstack-manila-api-version' API_MICROVERSIONS_HEADER = 'X-OpenStack-Manila-API-Version' _MIN_API_VERSION = CONF.share.min_api_microversion _MAX_API_VERSION = CONF.share.max_api_microversion class MicroversionsTest(base.BaseSharesTest): """Request and validate REST API Microversions. Sends HTTP GET requests to the version API to validate microversions. """ @test.attr(type=["gate", "smoke", ]) def test_microversions_root_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request() self.assertEqual(300, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0', 'v2.0'}, set(ids)) self.assertNotIn(API_MICROVERSIONS_HEADER_LOWER, resp) self.assertNotIn('vary', resp) v1 = [v for v in version_list if v['id'] == 'v1.0'][0] self.assertEqual('', v1.get('min_version')) self.assertEqual('', v1.get('version')) v2 = [v for v in version_list if v['id'] == 'v2.0'][0] self.assertEqual(_MIN_API_VERSION, v2.get('min_version')) self.assertEqual(_MAX_API_VERSION, v2.get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v1_no_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v1') self.assertEqual(200, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0'}, set(ids)) self.assertEqual('1.0', resp.get(API_MICROVERSIONS_HEADER_LOWER)) self.assertEqual(API_MICROVERSIONS_HEADER, resp.get('vary')) self.assertEqual('', version_list[0].get('min_version')) self.assertEqual('', version_list[0].get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v1_with_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v1', version='5.0') self.assertEqual(200, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0'}, set(ids)) self.assertEqual('1.0', resp.get(API_MICROVERSIONS_HEADER_LOWER)) self.assertEqual(API_MICROVERSIONS_HEADER, resp.get('vary')) self.assertEqual('', version_list[0].get('min_version')) self.assertEqual('', version_list[0].get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v2_no_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v2') self.assertEqual(200, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v2.0'}, set(ids)) self.assertEqual(_MIN_API_VERSION, resp.get(API_MICROVERSIONS_HEADER_LOWER)) self.assertEqual(API_MICROVERSIONS_HEADER, resp.get('vary')) self.assertEqual(_MIN_API_VERSION, version_list[0].get('min_version')) self.assertEqual(_MAX_API_VERSION, version_list[0].get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v2_min_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v2', version=_MIN_API_VERSION) self.assertEqual(200, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v2.0'}, set(ids)) self.assertEqual(_MIN_API_VERSION, resp.get(API_MICROVERSIONS_HEADER_LOWER)) self.assertEqual(API_MICROVERSIONS_HEADER, resp.get('vary')) self.assertEqual(_MIN_API_VERSION, version_list[0].get('min_version')) self.assertEqual(_MAX_API_VERSION, version_list[0].get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v2_max_version(self): resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v2', version=_MAX_API_VERSION) self.assertEqual(200, resp.status) version_list = resp_body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v2.0'}, set(ids)) self.assertEqual(_MAX_API_VERSION, resp.get(API_MICROVERSIONS_HEADER_LOWER)) self.assertEqual(API_MICROVERSIONS_HEADER, resp.get('vary')) self.assertEqual(_MIN_API_VERSION, version_list[0].get('min_version')) self.assertEqual(_MAX_API_VERSION, version_list[0].get('version')) @test.attr(type=["gate", "smoke", ]) def test_microversions_v2_invalid_version(self): resp, _ = self.shares_v2_client.send_microversion_request( script_name='v2', version='1.2.1') self.assertEqual(400, resp.status) @test.attr(type=["gate", "smoke", ]) def test_microversions_v2_unacceptable_version(self): # First get max version from the server resp, resp_body = self.shares_v2_client.send_microversion_request( script_name='v2') self.assertEqual(200, resp.status) version_list = resp_body['versions'] latest_version = version_list[0].get('version') major, minor = [int(ver) for ver in latest_version.split(".")] next_version = ('%s.%s' % (major + 1, minor + 1)) # Request a version that is too high resp, _ = self.shares_v2_client.send_microversion_request( script_name='v2', version=next_version) self.assertEqual(406, resp.status) manila-2.0.0/manila_tempest_tests/tests/api/test_security_services.py0000664000567000056710000002001112701407107027426 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log # noqa import six # noqa from tempest import config # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF LOG = log.getLogger(__name__) class SecurityServiceListMixin(object): @test.attr(type=["gate", "smoke"]) def test_list_security_services(self): listed = self.shares_client.list_security_services() self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) self.assertTrue(any(self.ss_kerberos['id'] == ss['id'] for ss in listed)) # verify keys keys = ["name", "id", "status", "type", ] [self.assertIn(key, s_s.keys()) for s_s in listed for key in keys] @test.attr(type=["gate", "smoke"]) def test_list_security_services_with_detail(self): listed = self.shares_client.list_security_services(detailed=True) self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) self.assertTrue(any(self.ss_kerberos['id'] == ss['id'] for ss in listed)) # verify keys keys = [ "name", "id", "status", "description", "domain", "server", "dns_ip", "user", "password", "type", "created_at", "updated_at", "project_id", ] [self.assertIn(key, s_s.keys()) for s_s in listed for key in keys] @test.attr(type=["gate", "smoke"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_list_security_services_filter_by_share_network(self): sn = self.shares_client.get_share_network( self.os.shares_client.share_network_id) fresh_sn = [] for i in range(2): sn = self.create_share_network( neutron_net_id=sn["neutron_net_id"], neutron_subnet_id=sn["neutron_subnet_id"]) fresh_sn.append(sn) self.shares_client.add_sec_service_to_share_network( fresh_sn[0]["id"], self.ss_ldap["id"]) self.shares_client.add_sec_service_to_share_network( fresh_sn[1]["id"], self.ss_kerberos["id"]) listed = self.shares_client.list_security_services( params={'share_network_id': fresh_sn[0]['id']}) self.assertEqual(1, len(listed)) self.assertEqual(self.ss_ldap['id'], listed[0]['id']) keys = ["name", "id", "status", "type", ] [self.assertIn(key, s_s.keys()) for s_s in listed for key in keys] @test.attr(type=["gate", "smoke"]) def test_list_security_services_detailed_filter_by_ss_attributes(self): search_opts = { 'name': 'ss_ldap', 'type': 'ldap', 'user': 'fake_user', 'server': 'fake_server_1', 'dns_ip': '1.1.1.1', 'domain': 'fake_domain_1', } listed = self.shares_client.list_security_services( detailed=True, params=search_opts) self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) for ss in listed: self.assertTrue(all(ss[key] == value for key, value in six.iteritems(search_opts))) class SecurityServicesTest(base.BaseSharesTest, SecurityServiceListMixin): def setUp(self): super(SecurityServicesTest, self).setUp() ss_ldap_data = { 'name': 'ss_ldap', 'dns_ip': '1.1.1.1', 'server': 'fake_server_1', 'domain': 'fake_domain_1', 'user': 'fake_user', 'password': 'pass', } ss_kerberos_data = { 'name': 'ss_kerberos', 'dns_ip': '2.2.2.2', 'server': 'fake_server_2', 'domain': 'fake_domain_2', 'user': 'test_user', 'password': 'word', } self.ss_ldap = self.create_security_service('ldap', **ss_ldap_data) self.ss_kerberos = self.create_security_service( 'kerberos', **ss_kerberos_data) @test.attr(type=["gate", "smoke"]) def test_create_delete_security_service(self): data = self.generate_security_service_data() self.service_names = ["ldap", "kerberos", "active_directory"] for ss_name in self.service_names: ss = self.create_security_service(ss_name, **data) self.assertDictContainsSubset(data, ss) self.assertEqual(ss_name, ss["type"]) self.shares_client.delete_security_service(ss["id"]) @test.attr(type=["gate", "smoke"]) def test_get_security_service(self): data = self.generate_security_service_data() ss = self.create_security_service(**data) self.assertDictContainsSubset(data, ss) get = self.shares_client.get_security_service(ss["id"]) self.assertDictContainsSubset(data, get) @test.attr(type=["gate", "smoke"]) def test_update_security_service(self): data = self.generate_security_service_data() ss = self.create_security_service(**data) self.assertDictContainsSubset(data, ss) upd_data = self.generate_security_service_data() updated = self.shares_client.update_security_service( ss["id"], **upd_data) get = self.shares_client.get_security_service(ss["id"]) self.assertDictContainsSubset(upd_data, updated) self.assertDictContainsSubset(upd_data, get) @test.attr(type=["gate", "smoke"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_try_update_valid_keys_sh_server_exists(self): ss_data = self.generate_security_service_data() ss = self.create_security_service(**ss_data) sn = self.shares_client.get_share_network( self.os.shares_client.share_network_id) fresh_sn = self.create_share_network( neutron_net_id=sn["neutron_net_id"], neutron_subnet_id=sn["neutron_subnet_id"]) self.shares_client.add_sec_service_to_share_network( fresh_sn["id"], ss["id"]) # Security service with fake data is used, so if we use backend driver # that fails on wrong data, we expect error here. # We require any share that uses our share-network. try: self.create_share( share_network_id=fresh_sn["id"], cleanup_in_class=False) except Exception as e: # we do wait for either 'error' or 'available' status because # it is the only available statuses for proper deletion. LOG.warning("Caught exception. It is expected in case backend " "fails having security-service with improper data " "that leads to share-server creation error. " "%s" % six.text_type(e)) update_data = { "name": "name", "description": "new_description", } updated = self.shares_client.update_security_service( ss["id"], **update_data) self.assertDictContainsSubset(update_data, updated) @test.attr(type=["gate", "smoke"]) def test_list_security_services_filter_by_invalid_opt(self): listed = self.shares_client.list_security_services( params={'fake_opt': 'some_value'}) self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) self.assertTrue(any(self.ss_kerberos['id'] == ss['id'] for ss in listed)) manila-2.0.0/manila_tempest_tests/tests/api/test_limits.py0000664000567000056710000000457212701407107025173 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base class ShareLimitsTest(base.BaseSharesTest): @test.attr(type=["gate", "smoke", ]) def test_limits_keys(self): # list limits limits = self.shares_client.get_limits() # verify response keys = ["rate", "absolute"] [self.assertIn(key, limits.keys()) for key in keys] abs_keys = [ "maxTotalShareGigabytes", "maxTotalShares", "maxTotalShareSnapshots", "maxTotalShareNetworks", "maxTotalSnapshotGigabytes", "totalSharesUsed", "totalShareSnapshotsUsed", "totalShareNetworksUsed", "totalShareGigabytesUsed", "totalSnapshotGigabytesUsed", ] [self.assertIn(key, limits["absolute"].keys()) for key in abs_keys] @test.attr(type=["gate", "smoke", ]) def test_limits_values(self): # list limits limits = self.shares_client.get_limits() # verify integer values for absolute limits abs_l = limits["absolute"] self.assertGreater(int(abs_l["maxTotalShareGigabytes"]), -2) self.assertGreater(int(abs_l["maxTotalShares"]), -2) self.assertGreater(int(abs_l["maxTotalShareSnapshots"]), -2) self.assertGreater(int(abs_l["maxTotalShareNetworks"]), -2) self.assertGreater(int(abs_l["maxTotalSnapshotGigabytes"]), -2) self.assertGreater(int(abs_l["totalSharesUsed"]), -2) self.assertGreater(int(abs_l["totalShareSnapshotsUsed"]), -2) self.assertGreater(int(abs_l["totalShareNetworksUsed"]), -2) self.assertGreater(int(abs_l["totalShareGigabytesUsed"]), -2) self.assertGreater(int(abs_l["totalSnapshotGigabytesUsed"]), -2) manila-2.0.0/manila_tempest_tests/tests/api/__init__.py0000664000567000056710000000000012701407107024350 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/api/test_replication_negative.py0000664000567000056710000001655512701407107030071 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yogesh Kshirsagar # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests import clients_share as clients from manila_tempest_tests.common import constants from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base CONF = config.CONF _MIN_SUPPORTED_MICROVERSION = '2.11' @testtools.skipUnless(CONF.share.run_replication_tests, 'Replication tests are disabled.') @base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) class ReplicationNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ReplicationNegativeTest, cls).resource_setup() # Create share_type name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) cls.admin_client = clients.AdminManager().shares_v2_client cls.replication_type = CONF.share.backend_replication_type if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: raise share_exceptions.ShareReplicationTypeException( replication_type=cls.replication_type ) cls.zones = cls.get_availability_zones(client=cls.admin_client) cls.share_zone = cls.zones[0] cls.replica_zone = cls.zones[-1] cls.extra_specs = cls.add_required_extra_specs_to_dict( {"replication_type": cls.replication_type}) share_type = cls.create_share_type( name, extra_specs=cls.extra_specs, client=cls.admin_client) cls.share_type = share_type["share_type"] # Create share with above share_type cls.share1, cls.instance_id1 = cls._create_share_get_instance() @classmethod def _create_share_get_instance(cls): share = cls.create_share(share_type_id=cls.share_type["id"], availability_zone=cls.share_zone,) share_instances = cls.admin_client.get_instances_of_share( share["id"], version=_MIN_SUPPORTED_MICROVERSION ) instance_id = share_instances[0]["id"] return share, instance_id def _is_replication_type_promotable(self): if (self.replication_type not in constants.REPLICATION_PROMOTION_CHOICES): msg = "Option backend_replication_type should be one of (%s)!" raise self.skipException( msg % ','.join(constants.REPLICATION_PROMOTION_CHOICES)) @test.attr(type=["gate", "negative", ]) def test_try_add_replica_to_share_with_no_replication_share_type(self): # Create share without replication type share = self.create_share() self.assertRaises(lib_exc.BadRequest, self.create_share_replica, share['id'], self.replica_zone) @test.attr(type=["gate", "negative", ]) def test_add_replica_to_share_with_error_state(self): # Set "error" state self.admin_client.reset_state( self.share1['id'], constants.STATUS_ERROR) self.addCleanup(self.admin_client.reset_state, self.share1['id'], constants.STATUS_AVAILABLE) self.assertRaises(lib_exc.BadRequest, self.create_share_replica, self.share1['id'], self.replica_zone) @test.attr(type=["gate", "negative", ]) def test_get_replica_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share_replica, data_utils.rand_uuid()) @test.attr(type=["gate", "negative", ]) def test_try_delete_replica_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.delete_share_replica, data_utils.rand_uuid()) @test.attr(type=["gate", "negative", ]) def test_try_delete_last_active_replica(self): self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.delete_share_replica, self.instance_id1) @test.attr(type=["gate", "negative", ]) def test_try_delete_share_having_replica(self): self.create_share_replica(self.share1["id"], self.replica_zone, cleanup_in_class=False) self.assertRaises(lib_exc.Conflict, self.shares_v2_client.delete_share, self.share1["id"]) @test.attr(type=["negative", "gate", ]) def test_promote_out_of_sync_share_replica(self): # Test promoting an out_of_sync share_replica to active state self._is_replication_type_promotable() share, instance_id = self._create_share_get_instance() replica = self.create_share_replica(share["id"], self.replica_zone, cleanup_in_class=False) # Set replica state to out of sync self.admin_client.reset_share_replica_state( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC, status_attr='replica_state') # Try promoting the first out_of_sync replica to active state self.assertRaises(lib_exc.Forbidden, self.shares_v2_client.promote_share_replica, replica['id']) @test.attr(type=["negative", "gate", ]) def test_promote_active_share_replica(self): # Test promote active share_replica self._is_replication_type_promotable() # Try promoting the active replica self.shares_v2_client.promote_share_replica(self.instance_id1, expected_status=200) @test.attr(type=["negative", "gate", ]) def test_promote_share_replica_for_writable_share_type(self): # Test promote active share_replica for writable share if self.replication_type != "writable": raise self.skipException("Option backend_replication_type " "should be writable!") share, instance_id = self._create_share_get_instance() replica = self.create_share_replica(share["id"], self.replica_zone, cleanup_in_class=False) # By default, 'writable' replica is expected to be in active state self.shares_v2_client.wait_for_share_replica_status( replica["id"], constants.REPLICATION_STATE_ACTIVE, status_attr='replica_state') # Try promoting the replica self.shares_v2_client.promote_share_replica(replica['id']) manila-2.0.0/manila_tempest_tests/tests/api/test_availability_zones.py0000664000567000056710000000411312701407107027551 0ustar jenkinsjenkins00000000000000# Copyright 2015 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from manila_tempest_tests.tests.api import base class AvailabilityZonesTest(base.BaseSharesTest): def _list_availability_zones_assertions(self, availability_zones): self.assertTrue(len(availability_zones) > 0) keys = ("created_at", "updated_at", "name", "id") for az in availability_zones: self.assertEqual(len(keys), len(az)) for key in keys: self.assertIn(key, az) @test.attr(type=["smoke", "gate"]) def test_list_availability_zones_legacy_url_api_v1(self): # NOTE(vponomaryov): remove this test with removal of availability zone # extension url support. azs = self.shares_client.list_availability_zones() self._list_availability_zones_assertions(azs) @test.attr(type=["smoke", "gate"]) @base.skip_if_microversion_not_supported("2.6") def test_list_availability_zones_legacy_url_api_v2(self): # NOTE(vponomaryov): remove this test with removal of availability zone # extension url support. azs = self.shares_v2_client.list_availability_zones( url='os-availability-zone', version='2.6') self._list_availability_zones_assertions(azs) @test.attr(type=["smoke", "gate"]) @base.skip_if_microversion_not_supported("2.7") def test_list_availability_zones(self): azs = self.shares_v2_client.list_availability_zones(version='2.7') self._list_availability_zones_assertions(azs) manila-2.0.0/manila_tempest_tests/tests/api/test_consistency_groups.py0000664000567000056710000001412312701407107027623 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF CG_REQUIRED_ELEMENTS = {"id", "name", "description", "created_at", "status", "share_types", "project_id", "host", "links"} CGSNAPSHOT_REQUIRED_ELEMENTS = {"id", "name", "description", "created_at", "status", "project_id", "links"} @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupsTest(base.BaseSharesTest): """Covers consistency group functionality.""" @test.attr(type=["gate", ]) def test_create_populate_delete_consistency_group_v2_4(self): # Create a consistency group consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') self.assertTrue(CG_REQUIRED_ELEMENTS.issubset( consistency_group.keys()), 'At least one expected element missing from consistency group ' 'response. Expected %(expected)s, got %(actual)s.' % { "expected": CG_REQUIRED_ELEMENTS, "actual": consistency_group.keys()}) # Populate share = self.create_share(consistency_group_id=consistency_group['id'], cleanup_in_class=False, client=self.shares_v2_client, version='2.4') # Delete params = {"consistency_group_id": consistency_group['id']} self.shares_v2_client.delete_share(share['id'], params=params, version='2.4') self.shares_client.wait_for_resource_deletion(share_id=share['id']) self.shares_v2_client.delete_consistency_group(consistency_group['id'], version='2.4') self.shares_v2_client.wait_for_resource_deletion( cg_id=consistency_group['id']) # Verify self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_consistency_group, consistency_group['id']) self.assertRaises(lib_exc.NotFound, self.shares_client.get_share, share['id']) @test.attr(type=["gate", ]) def test_create_delete_empty_cgsnapshot_v2_4(self): # Create base consistency group consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') # Create cgsnapshot cgsnapshot = self.create_cgsnapshot_wait_for_active( consistency_group["id"], cleanup_in_class=False, version='2.4') self.assertTrue(CGSNAPSHOT_REQUIRED_ELEMENTS.issubset( cgsnapshot.keys()), 'At least one expected element missing from cgsnapshot response. ' 'Expected %(expected)s, got %(actual)s.' % { "expected": CGSNAPSHOT_REQUIRED_ELEMENTS, "actual": cgsnapshot.keys()}) cgsnapshot_members = self.shares_v2_client.list_cgsnapshot_members( cgsnapshot['id'], version='2.4') self.assertEmpty(cgsnapshot_members, 'Expected 0 cgsnapshot members, got %s' % len( cgsnapshot_members)) # delete snapshot self.shares_v2_client.delete_cgsnapshot(cgsnapshot["id"], version='2.4') self.shares_v2_client.wait_for_resource_deletion( cgsnapshot_id=cgsnapshot["id"]) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_cgsnapshot, cgsnapshot['id'], version='2.4') @test.attr(type=["gate", "smoke", ]) def test_create_consistency_group_from_empty_cgsnapshot(self): # Create base consistency group consistency_group = self.create_consistency_group( cleanup_in_class=False) # Create cgsnapshot cgsnapshot = self.create_cgsnapshot_wait_for_active( consistency_group["id"], cleanup_in_class=False) cgsnapshot_members = self.shares_v2_client.list_cgsnapshot_members( cgsnapshot['id']) self.assertEmpty(cgsnapshot_members, 'Expected 0 cgsnapshot members, got %s' % len( cgsnapshot_members)) new_consistency_group = self.create_consistency_group( cleanup_in_class=False, source_cgsnapshot_id=cgsnapshot['id']) new_shares = self.shares_client.list_shares( params={'consistency_group_id': new_consistency_group['id']}) self.assertEmpty(new_shares, 'Expected 0 new shares, got %s' % len(new_shares)) msg = 'Expected cgsnapshot_id %s as source of share %s' % ( cgsnapshot['id'], new_consistency_group['source_cgsnapshot_id']) self.assertEqual(new_consistency_group['source_cgsnapshot_id'], cgsnapshot['id'], msg) msg = ('Unexpected share_types on new consistency group. Expected ' '%s, got %s.' % (consistency_group['share_types'], new_consistency_group['share_types'])) self.assertEqual(sorted(consistency_group['share_types']), sorted(new_consistency_group['share_types']), msg) manila-2.0.0/manila_tempest_tests/tests/api/test_metadata_negative.py0000664000567000056710000000716112701407107027331 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base class SharesMetadataNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SharesMetadataNegativeTest, cls).resource_setup() cls.share = cls.create_share() @test.attr(type=["gate", "negative", ]) def test_try_set_metadata_to_unexisting_share(self): md = {u"key1": u"value1", u"key2": u"value2", } self.assertRaises(lib_exc.NotFound, self.shares_client.set_metadata, "wrong_share_id", md) @test.attr(type=["gate", "negative", ]) def test_try_update_all_metadata_for_unexisting_share(self): md = {u"key1": u"value1", u"key2": u"value2", } self.assertRaises(lib_exc.NotFound, self.shares_client.update_all_metadata, "wrong_share_id", md) @test.attr(type=["gate", "negative", ]) def test_try_set_metadata_with_empty_key(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.set_metadata, self.share["id"], {"": "value"}) @test.attr(type=["gate", "negative", ]) def test_try_upd_metadata_with_empty_key(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.update_all_metadata, self.share["id"], {"": "value"}) @test.attr(type=["gate", "negative", ]) def test_try_set_metadata_with_too_big_key(self): too_big_key = "x" * 256 md = {too_big_key: "value"} self.assertRaises(lib_exc.BadRequest, self.shares_client.set_metadata, self.share["id"], md) @test.attr(type=["gate", "negative", ]) def test_try_upd_metadata_with_too_big_key(self): too_big_key = "x" * 256 md = {too_big_key: "value"} self.assertRaises(lib_exc.BadRequest, self.shares_client.update_all_metadata, self.share["id"], md) @test.attr(type=["gate", "negative", ]) def test_try_set_metadata_with_too_big_value(self): too_big_value = "x" * 1024 md = {"key": too_big_value} self.assertRaises(lib_exc.BadRequest, self.shares_client.set_metadata, self.share["id"], md) @test.attr(type=["gate", "negative", ]) def test_try_upd_metadata_with_too_big_value(self): too_big_value = "x" * 1024 md = {"key": too_big_value} self.assertRaises(lib_exc.BadRequest, self.shares_client.update_all_metadata, self.share["id"], md) @test.attr(type=["gate", "negative", ]) def test_try_delete_unexisting_metadata(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_metadata, self.share["id"], "wrong_key") manila-2.0.0/manila_tempest_tests/tests/api/test_share_types_negative.py0000664000567000056710000000515112701407107030074 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base class ShareTypesNegativeTest(base.BaseSharesTest): @classmethod def _create_share_type(cls): name = data_utils.rand_name("unique_st_name") extra_specs = cls.add_required_extra_specs_to_dict() return cls.create_share_type( name, extra_specs=extra_specs, client=clients.AdminManager().shares_client) @classmethod def resource_setup(cls): super(ShareTypesNegativeTest, cls).resource_setup() cls.st = cls._create_share_type() @test.attr(type=["gate", "smoke", "negative"]) def test_try_create_share_type_with_user(self): self.assertRaises(lib_exc.Forbidden, self.create_share_type, data_utils.rand_name("used_user_creds"), client=self.shares_client) @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_share_type_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.delete_share_type, self.st["share_type"]["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_add_access_to_share_type_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.add_access_to_share_type, self.st['share_type']['id'], self.shares_client.tenant_id) @test.attr(type=["gate", "smoke", "negative"]) def test_try_remove_access_from_share_type_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.remove_access_from_share_type, self.st['share_type']['id'], self.shares_client.tenant_id) manila-2.0.0/manila_tempest_tests/tests/api/test_quotas.py0000664000567000056710000000523312701407107025201 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import config from tempest import test from manila_tempest_tests.tests.api import base CONF = config.CONF @ddt.data class SharesQuotasTest(base.BaseSharesTest): @classmethod def resource_setup(cls): if not CONF.share.run_quota_tests: msg = "Quota tests are disabled." raise cls.skipException(msg) super(SharesQuotasTest, cls).resource_setup() cls.user_id = cls.shares_v2_client.user_id cls.tenant_id = cls.shares_v2_client.tenant_id @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_default_quotas(self, client_name): quotas = getattr(self, client_name).default_quotas(self.tenant_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_show_quotas(self, client_name): quotas = getattr(self, client_name).show_quotas(self.tenant_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_show_quotas_for_user(self, client_name): quotas = getattr(self, client_name).show_quotas( self.tenant_id, self.user_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) manila-2.0.0/manila_tempest_tests/tests/api/test_quotas_negative.py0000664000567000056710000000550212701407107027062 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test from manila_tempest_tests.tests.api import base CONF = config.CONF @ddt.ddt class SharesQuotasNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): if not CONF.share.run_quota_tests: msg = "Quota tests are disabled." raise cls.skipException(msg) super(SharesQuotasNegativeTest, cls).resource_setup() @test.attr(type=["gate", "smoke", "negative"]) def test_get_quotas_with_empty_tenant_id(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.show_quotas, "") @test.attr(type=["gate", "smoke", "negative", ]) def test_try_reset_quotas_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_v2_client.reset_quotas, self.shares_v2_client.tenant_id) @test.attr(type=["gate", "smoke", "negative", ]) def test_try_update_quotas_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_v2_client.update_quotas, self.shares_v2_client.tenant_id, shares=9) @ddt.data( ('services', '2.0', 'show_quotas'), ('services', '2.0', 'default_quotas'), ('services', '2.0', 'reset_quotas'), ('services', '2.0', 'update_quotas'), ('services', '2.6', 'show_quotas'), ('services', '2.6', 'default_quotas'), ('services', '2.6', 'reset_quotas'), ('services', '2.6', 'update_quotas'), ('os-services', '2.7', 'show_quotas'), ('os-services', '2.7', 'default_quotas'), ('os-services', '2.7', 'reset_quotas'), ('os-services', '2.7', 'update_quotas'), ) @ddt.unpack @test.attr(type=["gate", "smoke", "negative", ]) @base.skip_if_microversion_not_supported("2.7") def test_show_quotas_with_wrong_versions(self, url, version, method_name): self.assertRaises( lib_exc.NotFound, getattr(self.shares_v2_client, method_name), self.shares_v2_client.tenant_id, version=version, url=url, ) manila-2.0.0/manila_tempest_tests/tests/api/test_security_services_mapping.py0000664000567000056710000000465312701407107031157 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base class SecurityServicesMappingTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SecurityServicesMappingTest, cls).resource_setup() cls.cl = cls.shares_client def setUp(self): super(SecurityServicesMappingTest, self).setUp() # create share network data = self.generate_share_network_data() self.sn = self.create_share_network(client=self.cl, **data) self.assertDictContainsSubset(data, self.sn) # create security service data = self.generate_security_service_data() self.ss = self.create_security_service(client=self.cl, **data) self.assertDictContainsSubset(data, self.ss) # Add security service to share network self.cl.add_sec_service_to_share_network(self.sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke"]) def test_map_ss_to_sn_and_list(self): # List security services for share network ls = self.cl.list_sec_services_for_share_network(self.sn["id"]) self.assertEqual(1, len(ls)) for key in ["status", "id", "name"]: self.assertIn(self.ss[key], ls[0][key]) @test.attr(type=["gate", "smoke"]) def test_map_ss_to_sn_and_delete(self): # Remove security service from share network self.cl.remove_sec_service_from_share_network( self.sn["id"], self.ss["id"]) @test.attr(type=["gate", "smoke"]) def test_remap_ss_to_sn(self): # Remove security service from share network self.cl.remove_sec_service_from_share_network( self.sn["id"], self.ss["id"]) # Add security service to share network again self.cl.add_sec_service_to_share_network(self.sn["id"], self.ss["id"]) manila-2.0.0/manila_tempest_tests/tests/api/base.py0000664000567000056710000010301012701407107023530 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import inspect import traceback from oslo_concurrency import lockutils from oslo_log import log import six from tempest.common import credentials_factory as common_creds from tempest.common import dynamic_creds from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from tempest import test from manila_tempest_tests import clients_share as clients from manila_tempest_tests.common import constants from manila_tempest_tests import share_exceptions from manila_tempest_tests import utils CONF = config.CONF LOG = log.getLogger(__name__) class handle_cleanup_exceptions(object): """Handle exceptions raised with cleanup operations. Always suppress errors when exceptions.NotFound or exceptions.Forbidden are raised. Suppress all other exceptions only in case config opt 'suppress_errors_in_cleanup' in config group 'share' is True. """ def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): if not (isinstance(exc_value, (exceptions.NotFound, exceptions.Forbidden)) or CONF.share.suppress_errors_in_cleanup): return False # Do not suppress error if any if exc_traceback: LOG.error("Suppressed cleanup error in Manila: " "\n%s" % traceback.format_exc()) return True # Suppress error if any def network_synchronized(f): def wrapped_func(self, *args, **kwargs): with_isolated_creds = True if len(args) > 2 else False no_lock_required = kwargs.get( "isolated_creds_client", with_isolated_creds) if no_lock_required: # Usage of not reusable network. No need in lock. return f(self, *args, **kwargs) # Use lock assuming reusage of common network. @lockutils.synchronized("manila_network_lock", external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func skip_if_microversion_not_supported = utils.skip_if_microversion_not_supported skip_if_microversion_lt = utils.skip_if_microversion_lt class BaseSharesTest(test.BaseTestCase): """Base test case class for all Manila API tests.""" force_tenant_isolation = False protocols = ["nfs", "cifs", "glusterfs", "hdfs", "cephfs"] # Will be cleaned up in resource_cleanup class_resources = [] # Will be cleaned up in tearDown method method_resources = [] # Will be cleaned up in resource_cleanup class_isolated_creds = [] # Will be cleaned up in tearDown method method_isolated_creds = [] def skip_if_microversion_not_supported(self, microversion): if not utils.is_microversion_supported(microversion): raise self.skipException( "Microversion '%s' is not supported." % microversion) def skip_if_microversion_lt(self, microversion): if utils.is_microversion_lt(CONF.share.max_api_microversion, microversion): raise self.skipException( "Microversion must be greater than or equal to '%s'." % microversion) @classmethod def get_client_with_isolated_creds(cls, name=None, type_of_creds="admin", cleanup_in_class=False, client_version='1'): """Creates isolated creds. :param name: name, will be used for naming ic and related stuff :param type_of_creds: admin, alt or primary :param cleanup_in_class: defines place where to delete :returns: SharesClient -- shares client with isolated creds. :returns: To client added dict attr 'creds' with :returns: key elements 'tenant' and 'user'. """ if name is None: # Get name of test method name = inspect.stack()[1][3] if len(name) > 32: name = name[0:32] # Choose type of isolated creds ic = dynamic_creds.DynamicCredentialProvider( identity_version=CONF.identity.auth_version, name=name, admin_role=CONF.identity.admin_role, admin_creds=common_creds.get_configured_credentials( 'identity_admin')) if "admin" in type_of_creds: creds = ic.get_admin_creds() elif "alt" in type_of_creds: creds = ic.get_alt_creds() else: creds = ic.self.get_credentials(type_of_creds) ic.type_of_creds = type_of_creds # create client with isolated creds os = clients.Manager(credentials=creds) if client_version == '1': client = os.shares_client elif client_version == '2': client = os.shares_v2_client # Set place where will be deleted isolated creds ic_res = { "method": ic.clear_creds, "deleted": False, } if cleanup_in_class: cls.class_isolated_creds.insert(0, ic_res) else: cls.method_isolated_creds.insert(0, ic_res) # Provide share network if CONF.share.multitenancy_enabled: if not CONF.service_available.neutron: raise cls.skipException("Neutron support is required") nc = os.networks_client share_network_id = cls.provide_share_network(client, nc, ic) client.share_network_id = share_network_id resource = { "type": "share_network", "id": client.share_network_id, "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) return client @classmethod def verify_nonempty(cls, *args): if not all(args): msg = "Missing API credentials in configuration." raise cls.skipException(msg) @classmethod def resource_setup(cls): if not (any(p in CONF.share.enable_protocols for p in cls.protocols) and CONF.service_available.manila): skip_msg = "Manila is disabled" raise cls.skipException(skip_msg) super(BaseSharesTest, cls).resource_setup() if not hasattr(cls, "os"): cls.username = CONF.identity.username cls.password = CONF.identity.password cls.tenant_name = CONF.identity.tenant_name cls.verify_nonempty(cls.username, cls.password, cls.tenant_name) cls.os = clients.Manager() if CONF.share.multitenancy_enabled: if not CONF.service_available.neutron: raise cls.skipException("Neutron support is required") sc = cls.os.shares_client nc = cls.os.networks_client share_network_id = cls.provide_share_network(sc, nc) cls.os.shares_client.share_network_id = share_network_id cls.os.shares_v2_client.share_network_id = share_network_id cls.shares_client = cls.os.shares_client cls.shares_v2_client = cls.os.shares_v2_client def setUp(self): super(BaseSharesTest, self).setUp() self.addCleanup(self.clear_isolated_creds) self.addCleanup(self.clear_resources) @classmethod def resource_cleanup(cls): super(BaseSharesTest, cls).resource_cleanup() cls.clear_resources(cls.class_resources) cls.clear_isolated_creds(cls.class_isolated_creds) @classmethod @network_synchronized def provide_share_network(cls, shares_client, networks_client, isolated_creds_client=None): """Used for finding/creating share network for multitenant driver. This method creates/gets entity share-network for one tenant. This share-network will be used for creation of service vm. :param shares_client: shares client, which requires share-network :param networks_client: network client from same tenant as shares :param isolated_creds_client: DynamicCredentialProvider instance If provided, then its networking will be used if needed. If not provided, then common network will be used if needed. :returns: str -- share network id for shares_client tenant :returns: None -- if single-tenant driver used """ sc = shares_client if not CONF.share.multitenancy_enabled: # Assumed usage of a single-tenant driver share_network_id = None elif sc.share_network_id: # Share-network already exists, use it share_network_id = sc.share_network_id else: net_id = subnet_id = share_network_id = None if not isolated_creds_client: # Search for networks, created in previous runs search_word = "reusable" sn_name = "autogenerated_by_tempest_%s" % search_word service_net_name = "share-service" networks = networks_client.list_networks() if "networks" in networks.keys(): networks = networks["networks"] for network in networks: if (service_net_name in network["name"] and sc.tenant_id == network['tenant_id']): net_id = network["id"] if len(network["subnets"]) > 0: subnet_id = network["subnets"][0] break # Create suitable network if (net_id is None or subnet_id is None): ic = dynamic_creds.DynamicCredentialProvider( identity_version=CONF.identity.auth_version, name=service_net_name, admin_role=CONF.identity.admin_role, admin_creds=common_creds.get_configured_credentials( 'identity_admin')) net_data = ic._create_network_resources(sc.tenant_id) network, subnet, router = net_data net_id = network["id"] subnet_id = subnet["id"] # Try get suitable share-network share_networks = sc.list_share_networks_with_detail() for sn in share_networks: if (net_id == sn["neutron_net_id"] and subnet_id == sn["neutron_subnet_id"] and sn["name"] and search_word in sn["name"]): share_network_id = sn["id"] break else: sn_name = "autogenerated_by_tempest_for_isolated_creds" # Use precreated network and subnet from isolated creds net_id = isolated_creds_client.get_credentials( isolated_creds_client.type_of_creds).network['id'] subnet_id = isolated_creds_client.get_credentials( isolated_creds_client.type_of_creds).subnet['id'] # Create suitable share-network if share_network_id is None: sn_desc = "This share-network was created by tempest" sn = sc.create_share_network(name=sn_name, description=sn_desc, neutron_net_id=net_id, neutron_subnet_id=subnet_id) share_network_id = sn["id"] return share_network_id @classmethod def _create_share(cls, share_protocol=None, size=1, name=None, snapshot_id=None, description=None, metadata=None, share_network_id=None, share_type_id=None, consistency_group_id=None, client=None, cleanup_in_class=True, is_public=False, **kwargs): client = client or cls.shares_v2_client description = description or "Tempest's share" share_network_id = share_network_id or client.share_network_id or None metadata = metadata or {} kwargs.update({ 'share_protocol': share_protocol, 'size': size, 'name': name, 'snapshot_id': snapshot_id, 'description': description, 'metadata': metadata, 'share_network_id': share_network_id, 'share_type_id': share_type_id, 'is_public': is_public, }) if consistency_group_id: kwargs['consistency_group_id'] = consistency_group_id share = client.create_share(**kwargs) resource = {"type": "share", "id": share["id"], "client": client, "consistency_group_id": consistency_group_id} cleanup_list = (cls.class_resources if cleanup_in_class else cls.method_resources) cleanup_list.insert(0, resource) return share @classmethod def migrate_share(cls, share_id, dest_host, client=None, notify=True, wait_for_status='migration_success', **kwargs): client = client or cls.shares_v2_client client.migrate_share(share_id, dest_host, notify, **kwargs) share = client.wait_for_migration_status( share_id, dest_host, wait_for_status, version=kwargs.get('version')) return share @classmethod def migration_complete(cls, share_id, dest_host, client=None, **kwargs): client = client or cls.shares_v2_client client.migration_complete(share_id, **kwargs) share = client.wait_for_migration_status( share_id, dest_host, 'migration_success', version=kwargs.get('version')) return share @classmethod def create_share(cls, *args, **kwargs): """Create one share and wait for available state. Retry if allowed.""" result = cls.create_shares([{"args": args, "kwargs": kwargs}]) return result[0] @classmethod def create_shares(cls, share_data_list): """Creates several shares in parallel with retries. Use this method when you want to create more than one share at same time. Especially if config option 'share.share_creation_retry_number' has value more than zero (0). All shares will be expected to have 'available' status with or without recreation else error will be raised. :param share_data_list: list -- list of dictionaries with 'args' and 'kwargs' for '_create_share' method of this base class. example of data: share_data_list=[{'args': ['quuz'], 'kwargs': {'foo': 'bar'}}}] :returns: list -- list of shares created using provided data. """ data = [copy.deepcopy(d) for d in share_data_list] for d in data: if not isinstance(d, dict): raise exceptions.TempestException( "Expected 'dict', got '%s'" % type(d)) if "args" not in d: d["args"] = [] if "kwargs" not in d: d["kwargs"] = {} if len(d) > 2: raise exceptions.TempestException( "Expected only 'args' and 'kwargs' keys. " "Provided %s" % list(d)) d["kwargs"]["client"] = d["kwargs"].get( "client", cls.shares_v2_client) d["share"] = cls._create_share(*d["args"], **d["kwargs"]) d["cnt"] = 0 d["available"] = False while not all(d["available"] for d in data): for d in data: if d["available"]: continue try: d["kwargs"]["client"].wait_for_share_status( d["share"]["id"], "available") d["available"] = True except (share_exceptions.ShareBuildErrorException, exceptions.TimeoutException) as e: if CONF.share.share_creation_retry_number > d["cnt"]: d["cnt"] += 1 msg = ("Share '%s' failed to be built. " "Trying create another." % d["share"]["id"]) LOG.error(msg) LOG.error(e) d["share"] = cls._create_share( *d["args"], **d["kwargs"]) else: raise e return [d["share"] for d in data] @classmethod def create_consistency_group(cls, client=None, cleanup_in_class=True, share_network_id=None, **kwargs): client = client or cls.shares_v2_client kwargs['share_network_id'] = (share_network_id or client.share_network_id or None) consistency_group = client.create_consistency_group(**kwargs) resource = { "type": "consistency_group", "id": consistency_group["id"], "client": client} if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) if kwargs.get('source_cgsnapshot_id'): new_cg_shares = client.list_shares( detailed=True, params={'consistency_group_id': consistency_group['id']}) for share in new_cg_shares: resource = {"type": "share", "id": share["id"], "client": client, "consistency_group_id": share.get( 'consistency_group_id')} if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) client.wait_for_consistency_group_status(consistency_group['id'], 'available') return consistency_group @classmethod def create_snapshot_wait_for_active(cls, share_id, name=None, description=None, force=False, client=None, cleanup_in_class=True): if client is None: client = cls.shares_v2_client if description is None: description = "Tempest's snapshot" snapshot = client.create_snapshot(share_id, name, description, force) resource = { "type": "snapshot", "id": snapshot["id"], "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) client.wait_for_snapshot_status(snapshot["id"], "available") return snapshot @classmethod def create_cgsnapshot_wait_for_active(cls, consistency_group_id, name=None, description=None, client=None, cleanup_in_class=True, **kwargs): client = client or cls.shares_v2_client if description is None: description = "Tempest's cgsnapshot" cgsnapshot = client.create_cgsnapshot(consistency_group_id, name=name, description=description, **kwargs) resource = { "type": "cgsnapshot", "id": cgsnapshot["id"], "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) client.wait_for_cgsnapshot_status(cgsnapshot["id"], "available") return cgsnapshot @classmethod def get_availability_zones(cls, client=None): """List the availability zones for "manila-share" services that are currently in "up" state. """ client = client or cls.shares_v2_client cls.services = client.list_services() zones = [service['zone'] for service in cls.services if service['binary'] == "manila-share" and service['state'] == 'up'] return zones def get_pools_for_replication_domain(self): # Get the list of pools for the replication domain pools = self.admin_client.list_pools(detail=True)['pools'] instance_host = self.shares[0]['host'] host_pool = [p for p in pools if p['name'] == instance_host][0] rep_domain = host_pool['capabilities']['replication_domain'] pools_in_rep_domain = [p for p in pools if p['capabilities'][ 'replication_domain'] == rep_domain] return rep_domain, pools_in_rep_domain @classmethod def create_share_replica(cls, share_id, availability_zone, client=None, cleanup_in_class=False, cleanup=True): client = client or cls.shares_v2_client replica = client.create_share_replica(share_id, availability_zone) resource = { "type": "share_replica", "id": replica["id"], "client": client, "share_id": share_id, } # NOTE(Yogi1): Cleanup needs to be disabled during promotion tests. if cleanup: if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) client.wait_for_share_replica_status( replica["id"], constants.STATUS_AVAILABLE) return replica @classmethod def delete_share_replica(cls, replica_id, client=None): client = client or cls.shares_v2_client try: client.delete_share_replica(replica_id) client.wait_for_resource_deletion(replica_id=replica_id) except exceptions.NotFound: pass @classmethod def promote_share_replica(cls, replica_id, client=None): client = client or cls.shares_v2_client replica = client.promote_share_replica(replica_id) client.wait_for_share_replica_status( replica["id"], constants.REPLICATION_STATE_ACTIVE, status_attr="replica_state") return replica @classmethod def create_share_network(cls, client=None, cleanup_in_class=False, **kwargs): if client is None: client = cls.shares_client share_network = client.create_share_network(**kwargs) resource = { "type": "share_network", "id": share_network["id"], "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) return share_network @classmethod def create_security_service(cls, ss_type="ldap", client=None, cleanup_in_class=False, **kwargs): if client is None: client = cls.shares_client security_service = client.create_security_service(ss_type, **kwargs) resource = { "type": "security_service", "id": security_service["id"], "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) return security_service @classmethod def create_share_type(cls, name, is_public=True, client=None, cleanup_in_class=True, **kwargs): if client is None: client = cls.shares_v2_client share_type = client.create_share_type(name, is_public, **kwargs) resource = { "type": "share_type", "id": share_type["share_type"]["id"], "client": client, } if cleanup_in_class: cls.class_resources.insert(0, resource) else: cls.method_resources.insert(0, resource) return share_type @staticmethod def add_required_extra_specs_to_dict(extra_specs=None): dhss = six.text_type(CONF.share.multitenancy_enabled) snapshot_support = six.text_type( CONF.share.capability_snapshot_support) required = { "driver_handles_share_servers": dhss, "snapshot_support": snapshot_support, } if extra_specs: required.update(extra_specs) return required @classmethod def clear_isolated_creds(cls, creds=None): if creds is None: creds = cls.method_isolated_creds for ic in creds: if "deleted" not in ic.keys(): ic["deleted"] = False if not ic["deleted"]: with handle_cleanup_exceptions(): ic["method"]() ic["deleted"] = True @classmethod def clear_share_replicas(cls, share_id, client=None): client = client or cls.shares_v2_client share_replicas = client.list_share_replicas( share_id=share_id) for replica in share_replicas: try: cls.delete_share_replica(replica['id']) except exceptions.BadRequest: # Ignore the exception due to deletion of last active replica pass @classmethod def clear_resources(cls, resources=None): """Deletes resources, that were created in test suites. This method tries to remove resources from resource list, if it is not found, assumed it was deleted in test itself. It is expected, that all resources were added as LIFO due to restriction of deletion resources, that is in the chain. :param resources: dict with keys 'type','id','client' and 'deleted' """ if resources is None: resources = cls.method_resources for res in resources: if "deleted" not in res.keys(): res["deleted"] = False if "client" not in res.keys(): res["client"] = cls.shares_client if not(res["deleted"]): res_id = res['id'] client = res["client"] with handle_cleanup_exceptions(): if res["type"] is "share": cls.clear_share_replicas(res_id) cg_id = res.get('consistency_group_id') if cg_id: params = {'consistency_group_id': cg_id} client.delete_share(res_id, params=params) else: client.delete_share(res_id) client.wait_for_resource_deletion(share_id=res_id) elif res["type"] is "snapshot": client.delete_snapshot(res_id) client.wait_for_resource_deletion(snapshot_id=res_id) elif res["type"] is "share_network": client.delete_share_network(res_id) client.wait_for_resource_deletion(sn_id=res_id) elif res["type"] is "security_service": client.delete_security_service(res_id) client.wait_for_resource_deletion(ss_id=res_id) elif res["type"] is "share_type": client.delete_share_type(res_id) client.wait_for_resource_deletion(st_id=res_id) elif res["type"] is "consistency_group": client.delete_consistency_group(res_id) client.wait_for_resource_deletion(cg_id=res_id) elif res["type"] is "cgsnapshot": client.delete_cgsnapshot(res_id) client.wait_for_resource_deletion(cgsnapshot_id=res_id) elif res["type"] is "share_replica": client.delete_share_replica(res_id) client.wait_for_resource_deletion(replica_id=res_id) else: LOG.warning("Provided unsupported resource type for " "cleanup '%s'. Skipping." % res["type"]) res["deleted"] = True @classmethod def generate_share_network_data(self): data = { "name": data_utils.rand_name("sn-name"), "description": data_utils.rand_name("sn-desc"), "neutron_net_id": data_utils.rand_name("net-id"), "neutron_subnet_id": data_utils.rand_name("subnet-id"), } return data @classmethod def generate_security_service_data(self): data = { "name": data_utils.rand_name("ss-name"), "description": data_utils.rand_name("ss-desc"), "dns_ip": utils.rand_ip(), "server": utils.rand_ip(), "domain": data_utils.rand_name("ss-domain"), "user": data_utils.rand_name("ss-user"), "password": data_utils.rand_name("ss-password"), } return data # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' 'd2: %(d2str)s' % {"msg": msg, "d1str": d1str, "d2str": d2str}) raise AssertionError(base_msg) d1keys = set(d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' 'Keys in d2 and not d1: %(d2only)s' % {"d1only": d1only, "d2only": d2only}) for key in d1keys: d1value = d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue elif approx_equal and within_tolerance: continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % { "key": key, "d1value": d1value, "d2value": d2value }) class BaseSharesAltTest(BaseSharesTest): """Base test case class for all Shares Alt API tests.""" @classmethod def resource_setup(cls): cls.username = CONF.identity.alt_username cls.password = CONF.identity.alt_password cls.tenant_name = CONF.identity.alt_tenant_name cls.verify_nonempty(cls.username, cls.password, cls.tenant_name) cls.os = clients.AltManager() alt_share_network_id = CONF.share.alt_share_network_id cls.os.shares_client.share_network_id = alt_share_network_id cls.os.shares_v2_client.share_network_id = alt_share_network_id super(BaseSharesAltTest, cls).resource_setup() class BaseSharesAdminTest(BaseSharesTest): """Base test case class for all Shares Admin API tests.""" @classmethod def resource_setup(cls): if hasattr(CONF.identity, 'admin_username'): cls.username = CONF.identity.admin_username cls.password = CONF.identity.admin_password cls.tenant_name = CONF.identity.admin_tenant_name else: cls.username = CONF.auth.admin_username cls.password = CONF.auth.admin_password cls.tenant_name = CONF.auth.admin_tenant_name cls.verify_nonempty(cls.username, cls.password, cls.tenant_name) cls.os = clients.AdminManager() admin_share_network_id = CONF.share.admin_share_network_id cls.os.shares_client.share_network_id = admin_share_network_id cls.os.shares_v2_client.share_network_id = admin_share_network_id super(BaseSharesAdminTest, cls).resource_setup() manila-2.0.0/manila_tempest_tests/tests/api/test_consistency_group_actions.py0000664000567000056710000003662012701407107031166 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF CG_SIMPLE_KEYS = {"id", "name", "links"} CG_DETAIL_REQUIRED_KEYS = {"id", "name", "description", "created_at", "status", "project_id", "host", "links"} CGSNAPSHOT_SIMPLE_KEYS = {"id", "name", "links"} CGSNAPSHOT_DETAIL_REQUIRED_KEYS = {"id", "name", "description", "created_at", "status", "project_id", "links"} @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupActionsTest(base.BaseSharesTest): """Covers consistency group functionality.""" @classmethod def resource_setup(cls): super(ConsistencyGroupActionsTest, cls).resource_setup() # Create first consistency group cls.cg_name = data_utils.rand_name("tempest-cg-name") cls.cg_desc = data_utils.rand_name("tempest-cg-description") cls.cg = cls.create_consistency_group( name=cls.cg_name, description=cls.cg_desc) # Create second consistency group for purposes of sorting and snapshot # filtering cls.cg2 = cls.create_consistency_group( name=cls.cg_name, description=cls.cg_desc) # Create 2 shares inside first CG and 1 inside second CG cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.share_size = 1 cls.share_size2 = 2 cls.shares = cls.create_shares([ {'kwargs': { 'name': cls.share_name, 'description': cls.share_desc, 'size': size, 'consistency_group_id': cg_id, }} for size, cg_id in ((cls.share_size, cls.cg['id']), (cls.share_size2, cls.cg['id']), (cls.share_size, cls.cg2['id'])) ]) # Create CG snapshots cls.cgsnap_name = data_utils.rand_name("tempest-cgsnap-name") cls.cgsnap_desc = data_utils.rand_name("tempest-cgsnap-description") cls.cgsnapshot = cls.create_cgsnapshot_wait_for_active( cls.cg["id"], name=cls.cgsnap_name, description=cls.cgsnap_desc) cls.cgsnapshot2 = cls.create_cgsnapshot_wait_for_active( cls.cg2['id'], name=cls.cgsnap_name, description=cls.cgsnap_desc) @test.attr(type=["gate", ]) def test_get_consistency_group_v2_4(self): # Get consistency group consistency_group = self.shares_v2_client.get_consistency_group( self.cg['id'], version='2.4') # Verify keys actual_keys = set(consistency_group.keys()) self.assertTrue(CG_DETAIL_REQUIRED_KEYS.issubset(actual_keys), 'Not all required keys returned for consistency ' 'group %s. Expected at least: %s, found %s' % ( consistency_group['id'], CG_DETAIL_REQUIRED_KEYS, actual_keys)) # Verify values msg = "Expected name: '%s', actual name: '%s'" % ( self.cg_name, consistency_group["name"]) self.assertEqual(self.cg_name, str(consistency_group["name"]), msg) msg = "Expected description: '%s', actual description: '%s'" % ( self.cg_desc, consistency_group["description"]) self.assertEqual(self.cg_desc, str(consistency_group["description"]), msg) @test.attr(type=["gate", ]) def test_get_share_v2_4(self): # Get share share = self.shares_v2_client.get_share(self.shares[0]['id'], version='2.4') # Verify keys expected_keys = {"status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "name", "snapshot_id", "id", "size", "consistency_group_id"} actual_keys = set(share.keys()) self.assertTrue(expected_keys.issubset(actual_keys), 'Not all required keys returned for share %s. ' 'Expected at least: %s, found %s' % (share['id'], expected_keys, actual_keys)) # Verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, str(share["name"]), msg) msg = "Expected description: '%s', actual description: '%s'" % ( self.share_desc, share["description"]) self.assertEqual(self.share_desc, str(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (self.share_size, share["size"]) self.assertEqual(self.share_size, int(share["size"]), msg) msg = "Expected consistency_group_id: '%s', actual value: '%s'" % ( self.cg["id"], share["consistency_group_id"]) self.assertEqual(self.cg["id"], share["consistency_group_id"], msg) @test.attr(type=["gate", ]) def test_list_consistency_groups_v2_4(self): # List consistency groups consistency_groups = self.shares_v2_client.list_consistency_groups( version='2.4') # Verify keys [self.assertEqual(CG_SIMPLE_KEYS, set(cg.keys())) for cg in consistency_groups] # Consistency group ids are in list exactly once for cg_id in (self.cg["id"], self.cg2["id"]): gen = [cgid["id"] for cgid in consistency_groups if cgid["id"] == cg_id] msg = ("Expected id %s exactly once in consistency group list" % cg_id) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) def test_list_consistency_groups_with_detail_v2_4(self): # List consistency groups consistency_groups = self.shares_v2_client.list_consistency_groups( detailed=True, version='2.4') # Verify keys [self.assertTrue(CG_DETAIL_REQUIRED_KEYS.issubset(set(cg.keys()))) for cg in consistency_groups] # Consistency group ids are in list exactly once for cg_id in (self.cg["id"], self.cg2["id"]): gen = [cgid["id"] for cgid in consistency_groups if cgid["id"] == cg_id] msg = ("Expected id %s exactly once in consistency group list" % cg_id) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) def test_filter_shares_by_consistency_group_id_v2_4(self): shares = self.shares_v2_client.list_shares( detailed=True, params={'consistency_group_id': self.cg['id']}, version='2.4' ) share_ids = [share['id'] for share in shares] self.assertEqual(2, len(shares), 'Incorrect number of shares returned. Expected 2, ' 'got %s' % len(shares)) self.assertIn(self.shares[0]['id'], share_ids, 'Share %s expected in returned list, but got %s' % (self.shares[0]['id'], share_ids)) self.assertIn(self.shares[1]['id'], share_ids, 'Share %s expected in returned list, but got %s' % (self.shares[0]['id'], share_ids)) @test.attr(type=["gate", ]) def test_get_cgsnapshot_v2_4(self): # Get consistency group consistency_group = self.shares_v2_client.get_consistency_group( self.cg['id'], version='2.4') # Verify keys actual_keys = set(consistency_group.keys()) self.assertTrue(CG_DETAIL_REQUIRED_KEYS.issubset(actual_keys), 'Not all required keys returned for consistency ' 'group %s. Expected at least: %s, found %s' % ( consistency_group['id'], CG_DETAIL_REQUIRED_KEYS, actual_keys)) # Verify values msg = "Expected name: '%s', actual name: '%s'" % ( self.cg_name, consistency_group["name"]) self.assertEqual(self.cg_name, str(consistency_group["name"]), msg) msg = "Expected description: '%s', actual description: '%s'" % ( self.cg_desc, consistency_group["description"]) self.assertEqual(self.cg_desc, str(consistency_group["description"]), msg) @test.attr(type=["gate", ]) def test_get_cgsnapshot_members_v2_4(self): cgsnapshot_members = self.shares_v2_client.list_cgsnapshot_members( self.cgsnapshot['id'], version='2.4') member_share_ids = [member['share_id'] for member in cgsnapshot_members] self.assertEqual(2, len(cgsnapshot_members), 'Unexpected number of cgsnapshot members. Expected ' '2, got %s.' % len(cgsnapshot_members)) # Verify each share is represented in the cgsnapshot appropriately for share_id in (self.shares[0]['id'], self.shares[1]['id']): self.assertIn(share_id, member_share_ids, 'Share missing %s missing from cgsnapshot. Found %s.' % (share_id, member_share_ids)) for share in (self.shares[0], self.shares[1]): for member in cgsnapshot_members: if share['id'] == member['share_id']: self.assertEqual(share['size'], member['size']) self.assertEqual(share['share_proto'], member['share_protocol']) # TODO(akerr): Add back assert when bug 1483886 is fixed # self.assertEqual(share['share_type'], # member['share_type_id']) @test.attr(type=["gate", "smoke", ]) def test_create_consistency_group_from_populated_cgsnapshot_v2_4(self): cgsnapshot_members = self.shares_v2_client.list_cgsnapshot_members( self.cgsnapshot['id'], version='2.4') new_consistency_group = self.create_consistency_group( cleanup_in_class=False, source_cgsnapshot_id=self.cgsnapshot['id'], version='2.4' ) new_shares = self.shares_v2_client.list_shares( params={'consistency_group_id': new_consistency_group['id']}, detailed=True, version='2.4' ) # Verify each new share is available for share in new_shares: self.assertEqual('available', share['status'], 'Share %s is not in available status.' % share['id']) # Verify each cgsnapshot member is represented in the new cg # appropriately share_source_member_ids = [share['source_cgsnapshot_member_id'] for share in new_shares] for member in cgsnapshot_members: self.assertIn(member['id'], share_source_member_ids, 'cgsnapshot member %s not represented by ' 'consistency group %s.' % ( member['id'], new_consistency_group['id'])) for share in new_shares: if share['source_cgsnapshot_member_id'] == member['id']: self.assertEqual(member['size'], share['size']) self.assertEqual(member['share_protocol'], share['share_proto']) # TODO(akerr): Add back assert when bug 1483886 is fixed # self.assertEqual(member['share_type_id'], # share['share_type']) @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupRenameTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ConsistencyGroupRenameTest, cls).resource_setup() # Create consistency group cls.cg_name = data_utils.rand_name("tempest-cg-name") cls.cg_desc = data_utils.rand_name("tempest-cg-description") cls.consistency_group = cls.create_consistency_group( name=cls.cg_name, description=cls.cg_desc, ) @test.attr(type=["gate", ]) def test_update_consistency_group_v2_4(self): # Get consistency_group consistency_group = self.shares_v2_client.get_consistency_group( self.consistency_group['id'], version='2.4') self.assertEqual(self.cg_name, consistency_group["name"]) self.assertEqual(self.cg_desc, consistency_group["description"]) # Update consistency_group new_name = data_utils.rand_name("tempest-new-name") new_desc = data_utils.rand_name("tempest-new-description") updated = self.shares_v2_client.update_consistency_group( consistency_group["id"], name=new_name, description=new_desc, version='2.4' ) self.assertEqual(new_name, updated["name"]) self.assertEqual(new_desc, updated["description"]) # Get consistency_group consistency_group = self.shares_v2_client.get_consistency_group( self.consistency_group['id'], version='2.4') self.assertEqual(new_name, consistency_group["name"]) self.assertEqual(new_desc, consistency_group["description"]) @test.attr(type=["gate", ]) def test_create_update_read_consistency_group_with_unicode_v2_4(self): value1 = u'ಠ_ಠ' value2 = u'ಠ_ರೃ' # Create consistency_group consistency_group = self.create_consistency_group( cleanup_in_class=False, name=value1, description=value1, version='2.4' ) self.assertEqual(value1, consistency_group["name"]) self.assertEqual(value1, consistency_group["description"]) # Update consistency_group updated = self.shares_v2_client.update_consistency_group( consistency_group["id"], name=value2, description=value2, version='2.4' ) self.assertEqual(value2, updated["name"]) self.assertEqual(value2, updated["description"]) # Get consistency_group consistency_group = self.shares_v2_client.get_consistency_group( consistency_group['id'], version='2.4') self.assertEqual(value2, consistency_group["name"]) self.assertEqual(value2, consistency_group["description"]) manila-2.0.0/manila_tempest_tests/tests/api/test_rules_negative.py0000664000567000056710000004557412701407107026715 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF LATEST_MICROVERSION = CONF.share.max_api_microversion @ddt.ddt class ShareIpRulesForNFSNegativeTest(base.BaseSharesTest): protocol = "nfs" @classmethod def resource_setup(cls): super(ShareIpRulesForNFSNegativeTest, cls).resource_setup() if not (cls.protocol in CONF.share.enable_protocols and cls.protocol in CONF.share.enable_ip_rules_for_protocols): msg = "IP rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) # create share cls.share = cls.create_share(cls.protocol) if CONF.share.run_snapshot_tests: # create snapshot cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_1(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.256") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_2(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.1.1.-") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_3(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.4/33") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_4(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.*") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_5(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.*/23") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_6(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.1|23") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_7(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.1/-1") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_target_8(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "ip", "1.2.3.1/") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_with_wrong_level(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], 'ip', '2.2.2.2', 'su') @test.attr(type=["negative", "gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_duplicate_of_ip_rule(self, version): # test data access_type = "ip" access_to = "1.2.3.4" # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], access_type, access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], access_type, access_to, version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # try create duplicate of rule if utils.is_microversion_eq(version, '1.0'): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_access_rule, self.share["id"], access_type, access_to) else: self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.create_access_rule, self.share["id"], access_type, access_to, version=version) # delete rule and wait for deletion if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share["id"]) else: self.shares_v2_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share["id"], version=version) @ddt.ddt class ShareIpRulesForCIFSNegativeTest(ShareIpRulesForNFSNegativeTest): protocol = "cifs" @ddt.ddt class ShareUserRulesForNFSNegativeTest(base.BaseSharesTest): protocol = "nfs" @classmethod def resource_setup(cls): super(ShareUserRulesForNFSNegativeTest, cls).resource_setup() if not (cls.protocol in CONF.share.enable_protocols and cls.protocol in CONF.share.enable_user_rules_for_protocols): msg = "USER rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) # create share cls.share = cls.create_share(cls.protocol) if CONF.share.run_snapshot_tests: # create snapshot cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_wrong_input_2(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "user", "try+") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_empty_key(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "user", "") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_too_little_key(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "user", "abc") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_too_big_key(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "user", "a" * 33) @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_wrong_input_1(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "user", "try+") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_access_rule_user_to_snapshot(self, client_name): self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, self.snap["id"], access_type="user", access_to="fakeuser") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_user_with_wrong_share_id(self, client_name): self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, "wrong_share_id", access_type="user", access_to="fakeuser") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_with_wrong_level(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], 'user', CONF.share.username_for_user_rules, 'su') @ddt.ddt class ShareUserRulesForCIFSNegativeTest(ShareUserRulesForNFSNegativeTest): protocol = "cifs" @ddt.ddt class ShareCertRulesForGLUSTERFSNegativeTest(base.BaseSharesTest): protocol = "glusterfs" @classmethod def resource_setup(cls): super(ShareCertRulesForGLUSTERFSNegativeTest, cls).resource_setup() if not (cls.protocol in CONF.share.enable_protocols and cls.protocol in CONF.share.enable_cert_rules_for_protocols): msg = "CERT rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) # create share cls.share = cls.create_share(cls.protocol) if CONF.share.run_snapshot_tests: # create snapshot cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_cert_with_empty_common_name(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "cert", "") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_cert_with_whitespace_common_name(self, client_name): self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "cert", " ") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_cert_with_too_big_common_name(self, client_name): # common name cannot be more than 64 characters long self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "cert", "a" * 65) @test.attr(type=["negative", "gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_cert_to_snapshot(self, client_name): self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, self.snap["id"], access_type="cert", access_to="fakeclient1.com") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_cert_with_wrong_share_id(self, client_name): self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, "wrong_share_id", access_type="cert", access_to="fakeclient2.com") @ddt.ddt class ShareCephxRulesForCephFSNegativeTest(base.BaseSharesTest): protocol = "cephfs" @classmethod def resource_setup(cls): super(ShareCephxRulesForCephFSNegativeTest, cls).resource_setup() if not (cls.protocol in CONF.share.enable_protocols and cls.protocol in CONF.share.enable_cephx_rules_for_protocols): msg = ("CEPHX rule tests for %s protocol are disabled" % cls.protocol) raise cls.skipException(msg) # create share cls.share = cls.create_share(cls.protocol) cls.access_type = "cephx" cls.access_to = "david" @test.attr(type=["negative", "gate", ]) @ddt.data('jane.doe', u"bj\u00F6rn") def test_create_access_rule_cephx_with_invalid_cephx_id(self, access_to): self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.create_access_rule, self.share["id"], self.access_type, access_to) @test.attr(type=["negative", "gate", ]) def test_create_access_rule_cephx_with_wrong_level(self): self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.create_access_rule, self.share["id"], self.access_type, self.access_to, access_level="su") @test.attr(type=["negative", "gate", ]) def test_create_access_rule_cephx_with_unsupported_access_level_ro(self): rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, access_level="ro") self.assertRaises( share_exceptions.AccessRuleBuildErrorException, self.shares_client.wait_for_access_rule_status, self.share['id'], rule['id'], "active") @ddt.ddt class ShareRulesNegativeTest(base.BaseSharesTest): # Tests independent from rule type and share protocol @classmethod def resource_setup(cls): super(ShareRulesNegativeTest, cls).resource_setup() if not (any(p in CONF.share.enable_ip_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_user_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_cert_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_cephx_rules_for_protocols for p in cls.protocols)): cls.message = "Rule tests are disabled" raise cls.skipException(cls.message) # create share cls.share = cls.create_share() if CONF.share.run_snapshot_tests: # create snapshot cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"]) def skip_if_cephx_access_type_not_supported_by_client(self, client): if client == 'shares_client': version = '1.0' else: version = LATEST_MICROVERSION if (CONF.share.enable_cephx_rules_for_protocols and utils.is_microversion_lt(version, '2.13')): msg = ("API version %s does not support cephx access type, " "need version greater than 2.13." % version) raise self.skipException(msg) @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_delete_access_rule_with_wrong_id(self, client_name): self.skip_if_cephx_access_type_not_supported_by_client(client_name) self.assertRaises(lib_exc.NotFound, getattr(self, client_name).delete_access_rule, self.share["id"], "wrong_rule_id") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_type(self, client_name): self.skip_if_cephx_access_type_not_supported_by_client(client_name) self.assertRaises(lib_exc.BadRequest, getattr(self, client_name).create_access_rule, self.share["id"], "wrong_type", "1.2.3.4") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') def test_create_access_rule_ip_with_wrong_share_id(self, client_name): self.skip_if_cephx_access_type_not_supported_by_client(client_name) self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, "wrong_share_id") @test.attr(type=["negative", "gate", ]) @ddt.data('shares_client', 'shares_v2_client') @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_access_rule_ip_to_snapshot(self, client_name): self.skip_if_cephx_access_type_not_supported_by_client(client_name) self.assertRaises(lib_exc.NotFound, getattr(self, client_name).create_access_rule, self.snap["id"]) manila-2.0.0/manila_tempest_tests/tests/api/test_security_services_negative.py0000664000567000056710000001247512701407107031327 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log # noqa import six # noqa from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF LOG = log.getLogger(__name__) class SecurityServicesNegativeTest(base.BaseSharesTest): @test.attr(type=["gate", "smoke", "negative"]) def test_try_create_security_service_with_empty_type(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_security_service, "") @test.attr(type=["gate", "smoke", "negative"]) def test_try_create_security_service_with_wrong_type(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_security_service, "wrong_type") @test.attr(type=["gate", "smoke", "negative"]) def test_try_get_security_service_without_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_security_service, "") @test.attr(type=["gate", "smoke", "negative"]) def test_try_get_security_service_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_security_service, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_security_service_without_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_security_service, "") @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_security_service_with_wrong_type(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_security_service, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_try_update_nonexistant_security_service(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_security_service, "wrong_id", name="name") @test.attr(type=["gate", "smoke", "negative"]) def test_try_update_security_service_with_empty_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_security_service, "", name="name") @test.attr(type=["gate", "smoke", "negative"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_try_update_invalid_keys_sh_server_exists(self): ss_data = self.generate_security_service_data() ss = self.create_security_service(**ss_data) sn = self.shares_client.get_share_network( self.os.shares_client.share_network_id) fresh_sn = self.create_share_network( neutron_net_id=sn["neutron_net_id"], neutron_subnet_id=sn["neutron_subnet_id"]) self.shares_client.add_sec_service_to_share_network( fresh_sn["id"], ss["id"]) # Security service with fake data is used, so if we use backend driver # that fails on wrong data, we expect error here. # We require any share that uses our share-network. try: self.create_share( share_network_id=fresh_sn["id"], cleanup_in_class=False) except Exception as e: # we do wait for either 'error' or 'available' status because # it is the only available statuses for proper deletion. LOG.warning("Caught exception. It is expected in case backend " "fails having security-service with improper data " "that leads to share-server creation error. " "%s" % six.text_type(e)) self.assertRaises(lib_exc.Forbidden, self.shares_client.update_security_service, ss["id"], user="new_user") @test.attr(type=["gate", "smoke", "negative"]) def test_get_deleted_security_service(self): data = self.generate_security_service_data() ss = self.create_security_service(**data) self.assertDictContainsSubset(data, ss) self.shares_client.delete_security_service(ss["id"]) # try get deleted security service entity self.assertRaises(lib_exc.NotFound, self.shares_client.get_security_service, ss["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_list_security_services_all_tenants(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_security_services, params={'all_tenants': 1}) manila-2.0.0/manila_tempest_tests/tests/api/test_metadata.py0000664000567000056710000001211012701407107025435 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base class SharesMetadataTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SharesMetadataTest, cls).resource_setup() cls.share = cls.create_share() @test.attr(type=["gate", ]) def test_set_metadata_in_share_creation(self): md = {u"key1": u"value1", u"key2": u"value2", } # create share with metadata share = self.create_share(metadata=md, cleanup_in_class=False) # get metadata of share metadata = self.shares_client.get_metadata(share["id"]) # verify metadata self.assertEqual(md, metadata) @test.attr(type=["gate", ]) def test_set_get_delete_metadata(self): md = {u"key3": u"value3", u"key4": u"value4", } # create share share = self.create_share(cleanup_in_class=False) # set metadata self.shares_client.set_metadata(share["id"], md) # read metadata get_md = self.shares_client.get_metadata(share["id"]) # verify metadata self.assertEqual(md, get_md) # delete metadata for key in md.keys(): self.shares_client.delete_metadata(share["id"], key) # verify deletion of metadata get_metadata = self.shares_client.get_metadata(share["id"]) self.assertEqual({}, get_metadata) @test.attr(type=["gate", ]) def test_set_and_update_metadata_by_key(self): md1 = {u"key5": u"value5", u"key6": u"value6", } md2 = {u"key7": u"value7", u"key8": u"value8", } # create share share = self.create_share(cleanup_in_class=False) # set metadata self.shares_client.set_metadata(share["id"], md1) # update metadata self.shares_client.update_all_metadata(share["id"], md2) # get metadata get_md = self.shares_client.get_metadata(share["id"]) # verify metadata self.assertEqual(md2, get_md) @test.attr(type=["gate", ]) def test_set_metadata_min_size_key(self): data = {"k": "value"} self.shares_client.set_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data['k'], body_get.get('k')) @test.attr(type=["gate", ]) def test_set_metadata_max_size_key(self): max_key = "k" * 255 data = {max_key: "value"} self.shares_client.set_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertIn(max_key, body_get) self.assertEqual(data[max_key], body_get.get(max_key)) @test.attr(type=["gate", ]) def test_set_metadata_min_size_value(self): data = {"key": "v"} self.shares_client.set_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data['key'], body_get['key']) @test.attr(type=["gate", ]) def test_set_metadata_max_size_value(self): max_value = "v" * 1023 data = {"key": max_value} self.shares_client.set_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data['key'], body_get['key']) @test.attr(type=["gate", ]) def test_upd_metadata_min_size_key(self): data = {"k": "value"} self.shares_client.update_all_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data, body_get) @test.attr(type=["gate", ]) def test_upd_metadata_max_size_key(self): max_key = "k" * 255 data = {max_key: "value"} self.shares_client.update_all_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data, body_get) @test.attr(type=["gate", ]) def test_upd_metadata_min_size_value(self): data = {"key": "v"} self.shares_client.update_all_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data, body_get) @test.attr(type=["gate", ]) def test_upd_metadata_max_size_value(self): max_value = "v" * 1023 data = {"key": max_value} self.shares_client.update_all_metadata(self.share["id"], data) body_get = self.shares_client.get_metadata(self.share["id"]) self.assertEqual(data, body_get) manila-2.0.0/manila_tempest_tests/tests/api/test_share_networks.py0000664000567000056710000001761512701407107026732 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six # noqa from tempest import config # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class ShareNetworkListMixin(object): @test.attr(type=["gate", "smoke", ]) def test_list_share_networks(self): listed = self.shares_client.list_share_networks() any(self.sn_with_ldap_ss["id"] in sn["id"] for sn in listed) # verify keys keys = ["name", "id"] [self.assertIn(key, sn.keys()) for sn in listed for key in keys] @test.attr(type=["gate", "smoke", ]) def test_list_share_networks_with_detail(self): listed = self.shares_client.list_share_networks_with_detail() any(self.sn_with_ldap_ss["id"] in sn["id"] for sn in listed) # verify keys keys = [ "name", "id", "description", "network_type", "project_id", "cidr", "ip_version", "neutron_net_id", "neutron_subnet_id", "created_at", "updated_at", "segmentation_id", ] [self.assertIn(key, sn.keys()) for sn in listed for key in keys] @test.attr(type=["gate", "smoke", ]) def test_list_share_networks_filter_by_ss(self): listed = self.shares_client.list_share_networks_with_detail( {'security_service_id': self.ss_ldap['id']}) self.assertTrue(any(self.sn_with_ldap_ss['id'] == sn['id'] for sn in listed)) for sn in listed: ss_list = self.shares_client.list_sec_services_for_share_network( sn['id']) self.assertTrue(any(ss['id'] == self.ss_ldap['id'] for ss in ss_list)) @test.attr(type=["gate", "smoke", ]) def test_list_share_networks_all_filter_opts(self): valid_filter_opts = { 'created_before': '2002-10-10', 'created_since': '2001-01-01', 'neutron_net_id': '1111', 'neutron_subnet_id': '2222', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'sn_with_ldap_ss' } listed = self.shares_client.list_share_networks_with_detail( valid_filter_opts) self.assertTrue(any(self.sn_with_ldap_ss['id'] == sn['id'] for sn in listed)) created_before = valid_filter_opts.pop('created_before') created_since = valid_filter_opts.pop('created_since') for sn in listed: self.assertTrue(all(sn[key] == value for key, value in six.iteritems(valid_filter_opts))) self.assertTrue(sn['created_at'] <= created_before) self.assertTrue(sn['created_at'] >= created_since) class ShareNetworksTest(base.BaseSharesTest, ShareNetworkListMixin): @classmethod def resource_setup(cls): super(ShareNetworksTest, cls).resource_setup() ss_data = cls.generate_security_service_data() cls.ss_ldap = cls.create_security_service(**ss_data) cls.data_sn_with_ldap_ss = { 'name': 'sn_with_ldap_ss', 'neutron_net_id': '1111', 'neutron_subnet_id': '2222', 'created_at': '2002-02-02', 'updated_at': None, 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'description': 'fake description', } cls.sn_with_ldap_ss = cls.create_share_network( cleanup_in_class=True, **cls.data_sn_with_ldap_ss) cls.shares_client.add_sec_service_to_share_network( cls.sn_with_ldap_ss["id"], cls.ss_ldap["id"]) cls.data_sn_with_kerberos_ss = { 'name': 'sn_with_kerberos_ss', 'created_at': '2003-03-03', 'updated_at': None, 'neutron_net_id': 'test net id', 'neutron_subnet_id': 'test subnet id', 'network_type': 'local', 'segmentation_id': 2000, 'cidr': '10.0.0.0/13', 'ip_version': 6, 'description': 'fake description', } cls.ss_kerberos = cls.create_security_service( ss_type='kerberos', **cls.data_sn_with_ldap_ss) cls.sn_with_kerberos_ss = cls.create_share_network( cleanup_in_class=True, **cls.data_sn_with_kerberos_ss) cls.shares_client.add_sec_service_to_share_network( cls.sn_with_kerberos_ss["id"], cls.ss_kerberos["id"]) @test.attr(type=["gate", "smoke", ]) def test_create_delete_share_network(self): # generate data for share network data = self.generate_share_network_data() # create share network created = self.shares_client.create_share_network(**data) self.assertDictContainsSubset(data, created) # Delete share_network self.shares_client.delete_share_network(created["id"]) @test.attr(type=["gate", "smoke", ]) def test_get_share_network(self): get = self.shares_client.get_share_network(self.sn_with_ldap_ss["id"]) self.assertEqual('2002-02-02T00:00:00.000000', get['created_at']) data = self.data_sn_with_ldap_ss.copy() del data['created_at'] self.assertDictContainsSubset(data, get) @test.attr(type=["gate", "smoke", ]) def test_update_share_network(self): update_data = self.generate_share_network_data() updated = self.shares_client.update_share_network( self.sn_with_ldap_ss["id"], **update_data) self.assertDictContainsSubset(update_data, updated) @test.attr(type=["gate", "smoke"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_update_valid_keys_sh_server_exists(self): self.create_share(cleanup_in_class=False) update_dict = { "name": "new_name", "description": "new_description", } updated = self.shares_client.update_share_network( self.shares_client.share_network_id, **update_dict) self.assertDictContainsSubset(update_dict, updated) @test.attr(type=["gate", "smoke", ]) def test_recreate_share_network(self): # generate data for share network data = self.generate_share_network_data() # create share network sn1 = self.shares_client.create_share_network(**data) self.assertDictContainsSubset(data, sn1) # Delete first share network self.shares_client.delete_share_network(sn1["id"]) # create second share network with same data sn2 = self.shares_client.create_share_network(**data) self.assertDictContainsSubset(data, sn2) # Delete second share network self.shares_client.delete_share_network(sn2["id"]) @test.attr(type=["gate", "smoke", ]) def test_create_two_share_networks_with_same_net_and_subnet(self): # generate data for share network data = self.generate_share_network_data() # create first share network sn1 = self.create_share_network(**data) self.assertDictContainsSubset(data, sn1) # create second share network sn2 = self.create_share_network(**data) self.assertDictContainsSubset(data, sn2) manila-2.0.0/manila_tempest_tests/tests/api/test_shares.py0000664000567000056710000001764512701407107025164 0ustar jenkinsjenkins00000000000000# Copyright 2014 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF class SharesNFSTest(base.BaseSharesTest): """Covers share functionality, that is related to NFS share type.""" protocol = "nfs" @classmethod def resource_setup(cls): super(SharesNFSTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) cls.share = cls.create_share(cls.protocol) @test.attr(type=["gate", ]) def test_create_get_delete_share(self): share = self.create_share(self.protocol) detailed_elements = {'name', 'id', 'availability_zone', 'description', 'project_id', 'host', 'created_at', 'share_proto', 'metadata', 'size', 'snapshot_id', 'share_network_id', 'status', 'share_type', 'volume_type', 'links', 'is_public'} msg = ( "At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": share.keys(), } ) self.assertTrue(detailed_elements.issubset(share.keys()), msg) self.assertFalse(share['is_public']) # The 'status' of the share returned by the create API must be # the default value - 'creating'. self.assertEqual('creating', share['status']) # Get share using v 2.1 - we expect key 'snapshot_support' to be absent share_get = self.shares_v2_client.get_share(share['id'], version='2.1') detailed_elements.add('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Get share using v 2.2 - we expect key 'snapshot_support' to exist share_get = self.shares_v2_client.get_share(share['id'], version='2.2') detailed_elements.add('snapshot_support') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) if utils.is_microversion_supported('2.9'): # Get share using v 2.9 - key 'export_location' is expected # to be absent share_get = self.shares_v2_client.get_share( share['id'], version='2.9') detailed_elements.remove('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # In v 2.11 and beyond, we expect key 'replication_type' in the # share data returned by the share create API. if utils.is_microversion_supported('2.11'): detailed_elements.add('replication_type') self.assertTrue(detailed_elements.issubset(share.keys()), msg) # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id']) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_delete_snapshot(self): # create snapshot snap = self.create_snapshot_wait_for_active(self.share["id"]) detailed_elements = {'name', 'id', 'description', 'created_at', 'share_proto', 'size', 'share_size', 'share_id', 'status', 'links'} self.assertTrue(detailed_elements.issubset(snap.keys()), 'At least one expected element missing from snapshot ' 'response. Expected %(expected)s, got %(actual)s.' % { "expected": detailed_elements, "actual": snap.keys()}) # delete snapshot self.shares_client.delete_snapshot(snap["id"]) self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"]) self.assertRaises(lib_exc.NotFound, self.shares_client.get_snapshot, snap['id']) @test.attr(type=["gate", "smoke", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_share_from_snapshot(self): # If multitenant driver used, share_network will be provided by default # create snapshot snap = self.create_snapshot_wait_for_active( self.share["id"], cleanup_in_class=False) # create share from snapshot s2 = self.create_share( self.protocol, snapshot_id=snap["id"], cleanup_in_class=False) # The 'status' of the share returned by the create API must be # the default value - 'creating'. self.assertEqual('creating', s2['status']) # verify share, created from snapshot get = self.shares_client.get_share(s2["id"]) msg = "Expected snapshot_id %s as "\ "source of share %s" % (snap["id"], get["snapshot_id"]) self.assertEqual(get["snapshot_id"], snap["id"], msg) @test.attr(type=["gate", "smoke", ]) @testtools.skipIf(not CONF.share.multitenancy_enabled, "Only for multitenancy.") @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_share_from_snapshot_share_network_not_provided(self): # We expect usage of share network from parent's share # when creating share from snapshot using multitenant driver. # get parent share parent = self.shares_client.get_share(self.share["id"]) # create snapshot snap = self.create_snapshot_wait_for_active( self.share["id"], cleanup_in_class=False) # create share from snapshot child = self.create_share( self.protocol, snapshot_id=snap["id"], cleanup_in_class=False) # The 'status' of the share returned by the create API must be # the default value - 'creating'. self.assertEqual('creating', child['status']) # verify share, created from snapshot get = self.shares_client.get_share(child["id"]) keys = { "share": self.share["id"], "actual_sn": get["share_network_id"], "expected_sn": parent["share_network_id"], } msg = ("Expected share_network_id %(expected_sn)s for " "share %(share)s, but %(actual_sn)s found." % keys) self.assertEqual( get["share_network_id"], parent["share_network_id"], msg) class SharesCIFSTest(SharesNFSTest): """Covers share functionality, that is related to CIFS share type.""" protocol = "cifs" class SharesGLUSTERFSTest(SharesNFSTest): """Covers share functionality that is related to GLUSTERFS share type.""" protocol = "glusterfs" class SharesHDFSTest(SharesNFSTest): """Covers share functionality that is related to HDFS share type.""" protocol = "hdfs" class SharesCephFSTest(SharesNFSTest): """Covers share functionality that is related to CEPHFS share type.""" protocol = "cephfs" manila-2.0.0/manila_tempest_tests/tests/api/test_scheduler_stats_negative.py0000664000567000056710000000237012701407107030742 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base class SchedulerStatsNegativeTest(base.BaseSharesTest): @test.attr(type=["gate", "smoke", "negative", ]) def test_try_list_pools_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_pools) @test.attr(type=["gate", "smoke", "negative", ]) def test_try_list_pools_detailed_with_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_pools, detail=True) manila-2.0.0/manila_tempest_tests/tests/api/test_rules.py0000664000567000056710000005533212701407107025024 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF LATEST_MICROVERSION = CONF.share.max_api_microversion def _create_delete_ro_access_rule(self, version): """Common test case for usage in test suites with different decorators. :param self: instance of test class """ if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to, 'ro') else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, 'ro', version=version) self.assertEqual('ro', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) if utils.is_microversion_le(version, '2.9'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @ddt.ddt class ShareIpRulesForNFSTest(base.BaseSharesTest): protocol = "nfs" @classmethod def resource_setup(cls): super(ShareIpRulesForNFSTest, cls).resource_setup() if (cls.protocol not in CONF.share.enable_protocols or cls.protocol not in CONF.share.enable_ip_rules_for_protocols): msg = "IP rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) cls.share = cls.create_share(cls.protocol) cls.access_type = "ip" cls.access_to = "2.2.2.2" @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_access_rules_with_one_ip(self, version): # test data access_to = "1.1.1.1" # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, access_to, version=version) self.assertEqual('rw', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # delete rule and wait for deletion if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_access_rule_with_cidr(self, version): # test data access_to = "1.2.3.4/32" # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, access_to, version=version) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) self.assertEqual('rw', rule['access_level']) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # delete rule and wait for deletion if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @test.attr(type=["gate", ]) @testtools.skipIf( "nfs" not in CONF.share.enable_ro_access_level_for_protocols, "RO access rule tests are disabled for NFS protocol.") @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_ro_access_rule(self, client_name): _create_delete_ro_access_rule(self, client_name) @ddt.ddt class ShareIpRulesForCIFSTest(ShareIpRulesForNFSTest): protocol = "cifs" @test.attr(type=["gate", ]) @testtools.skipIf( "cifs" not in CONF.share.enable_ro_access_level_for_protocols, "RO access rule tests are disabled for CIFS protocol.") @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_ro_access_rule(self, version): _create_delete_ro_access_rule(self, version) @ddt.ddt class ShareUserRulesForNFSTest(base.BaseSharesTest): protocol = "nfs" @classmethod def resource_setup(cls): super(ShareUserRulesForNFSTest, cls).resource_setup() if (cls.protocol not in CONF.share.enable_protocols or cls.protocol not in CONF.share.enable_user_rules_for_protocols): msg = "USER rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) cls.share = cls.create_share(cls.protocol) cls.access_type = "user" cls.access_to = CONF.share.username_for_user_rules @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_user_rule(self, version): # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, version=version) self.assertEqual('rw', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # delete rule and wait for deletion if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @test.attr(type=["gate", ]) @testtools.skipIf( "nfs" not in CONF.share.enable_ro_access_level_for_protocols, "RO access rule tests are disabled for NFS protocol.") @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_ro_access_rule(self, version): _create_delete_ro_access_rule(self, version) @ddt.ddt class ShareUserRulesForCIFSTest(ShareUserRulesForNFSTest): protocol = "cifs" @test.attr(type=["gate", ]) @testtools.skipIf( "cifs" not in CONF.share.enable_ro_access_level_for_protocols, "RO access rule tests are disabled for CIFS protocol.") @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_ro_access_rule(self, version): _create_delete_ro_access_rule(self, version) @ddt.ddt class ShareCertRulesForGLUSTERFSTest(base.BaseSharesTest): protocol = "glusterfs" @classmethod def resource_setup(cls): super(ShareCertRulesForGLUSTERFSTest, cls).resource_setup() if (cls.protocol not in CONF.share.enable_protocols or cls.protocol not in CONF.share.enable_cert_rules_for_protocols): msg = "Cert rule tests for %s protocol are disabled" % cls.protocol raise cls.skipException(msg) cls.share = cls.create_share(cls.protocol) cls.access_type = "cert" # Provide access to a client identified by a common name (CN) of the # certificate that it possesses. cls.access_to = "client1.com" @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_cert_rule(self, version): # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, version=version) self.assertEqual('rw', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # delete rule if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @test.attr(type=["gate", ]) @testtools.skipIf( "glusterfs" not in CONF.share.enable_ro_access_level_for_protocols, "RO access rule tests are disabled for GLUSTERFS protocol.") @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_create_delete_cert_ro_access_rule(self, version): if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], 'cert', 'client2.com', 'ro') else: rule = self.shares_v2_client.create_access_rule( self.share["id"], 'cert', 'client2.com', 'ro', version=version) self.assertEqual('ro', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @ddt.ddt class ShareCephxRulesForCephFSTest(base.BaseSharesTest): protocol = "cephfs" @classmethod def resource_setup(cls): super(ShareCephxRulesForCephFSTest, cls).resource_setup() if (cls.protocol not in CONF.share.enable_protocols or cls.protocol not in CONF.share.enable_cephx_rules_for_protocols): msg = ("Cephx rule tests for %s protocol are disabled." % cls.protocol) raise cls.skipException(msg) cls.share = cls.create_share(cls.protocol) cls.access_type = "cephx" # Provide access to a client identified by a cephx auth id. cls.access_to = "bob" @test.attr(type=["gate", ]) @ddt.data("alice", "alice_bob", "alice bob") def test_create_delete_cephx_rule(self, access_to): rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, access_to) self.assertEqual('rw', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") self.shares_v2_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) @ddt.ddt class ShareRulesTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ShareRulesTest, cls).resource_setup() if not (any(p in CONF.share.enable_ip_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_user_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_cert_rules_for_protocols for p in cls.protocols) or any(p in CONF.share.enable_cephx_rules_for_protocols for p in cls.protocols)): cls.message = "Rule tests are disabled" raise cls.skipException(cls.message) if CONF.share.enable_ip_rules_for_protocols: cls.protocol = CONF.share.enable_ip_rules_for_protocols[0] cls.access_type = "ip" cls.access_to = "8.8.8.8" elif CONF.share.enable_user_rules_for_protocols: cls.protocol = CONF.share.enable_user_rules_for_protocols[0] cls.access_type = "user" cls.access_to = CONF.share.username_for_user_rules elif CONF.share.enable_cert_rules_for_protocols: cls.protocol = CONF.share.enable_cert_rules_for_protocols[0] cls.access_type = "cert" cls.access_to = "client3.com" elif CONF.share.enable_cephx_rules_for_protocols: cls.protocol = CONF.share.enable_cephx_rules_for_protocols[0] cls.access_type = "cephx" cls.access_to = "alice" cls.shares_v2_client.share_protocol = cls.protocol cls.share = cls.create_share() @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_list_access_rules(self, version): if (utils.is_microversion_lt(version, '2.13') and CONF.share.enable_cephx_rules_for_protocols): msg = ("API version %s does not support cephx access type, " "need version greater than 2.13." % version) raise self.skipException(msg) # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # list rules if utils.is_microversion_eq(version, '1.0'): rules = self.shares_client.list_access_rules(self.share["id"]) else: rules = self.shares_v2_client.list_access_rules(self.share["id"], version=version) # verify keys for key in ("id", "access_type", "access_to", "access_level"): [self.assertIn(key, r.keys()) for r in rules] for key in ('deleted', 'deleted_at', 'instance_mappings'): [self.assertNotIn(key, r.keys()) for r in rules] # verify values self.assertEqual(self.access_type, rules[0]["access_type"]) self.assertEqual(self.access_to, rules[0]["access_to"]) self.assertEqual('rw', rules[0]["access_level"]) # our share id in list and have no duplicates gen = [r["id"] for r in rules if r["id"] in rule["id"]] msg = "expected id lists %s times in rule list" % (len(gen)) self.assertEqual(1, len(gen), msg) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version) @test.attr(type=["gate", ]) @ddt.data('1.0', '2.9', LATEST_MICROVERSION) def test_access_rules_deleted_if_share_deleted(self, version): if (utils.is_microversion_lt(version, '2.13') and CONF.share.enable_cephx_rules_for_protocols): msg = ("API version %s does not support cephx access type, " "need version greater than 2.13." % version) raise self.skipException(msg) # create share share = self.create_share() # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule( share["id"], self.access_type, self.access_to, version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( share["id"], "active", status_attr='access_rules_status', version=version) # delete share if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_share(share['id']) self.shares_client.wait_for_resource_deletion(share_id=share['id']) else: self.shares_v2_client.delete_share(share['id'], version=version) self.shares_v2_client.wait_for_resource_deletion( share_id=share['id'], version=version) # verify absence of rules for nonexistent share id if utils.is_microversion_eq(version, '1.0'): self.assertRaises(lib_exc.NotFound, self.shares_client.list_access_rules, share['id']) else: self.assertRaises(lib_exc.NotFound, self.shares_v2_client.list_access_rules, share['id'], version) manila-2.0.0/manila_tempest_tests/tests/api/test_shares_actions.py0000664000567000056710000005756512701407107026711 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF class SharesActionsTest(base.BaseSharesTest): """Covers share functionality, that doesn't related to share type.""" @classmethod def resource_setup(cls): super(SharesActionsTest, cls).resource_setup() cls.shares = [] # create share cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.metadata = { 'foo_key_share_1': 'foo_value_share_1', 'bar_key_share_1': 'foo_value_share_1', } cls.share_size = 1 cls.shares.append(cls.create_share( name=cls.share_name, description=cls.share_desc, size=cls.share_size, metadata=cls.metadata, )) if CONF.share.run_snapshot_tests: # create snapshot cls.snap_name = data_utils.rand_name("tempest-snapshot-name") cls.snap_desc = data_utils.rand_name( "tempest-snapshot-description") cls.snap = cls.create_snapshot_wait_for_active( cls.shares[0]["id"], cls.snap_name, cls.snap_desc) # create second share from snapshot for purposes of sorting and # snapshot filtering cls.share_name2 = data_utils.rand_name("tempest-share-name") cls.share_desc2 = data_utils.rand_name("tempest-share-description") cls.metadata2 = { 'foo_key_share_2': 'foo_value_share_2', 'bar_key_share_2': 'foo_value_share_2', } cls.shares.append(cls.create_share( name=cls.share_name2, description=cls.share_desc2, size=cls.share_size, metadata=cls.metadata2, snapshot_id=cls.snap['id'], )) def _get_share(self, version): # get share share = self.shares_v2_client.get_share( self.shares[0]['id'], version=six.text_type(version)) # verify keys expected_keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): expected_keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): expected_keys.extend(["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.5'): expected_keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replication_type") actual_keys = list(share.keys()) [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, six.text_type(share["name"]), msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.share_desc, share["description"]) self.assertEqual( self.share_desc, six.text_type(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (self.share_size, share["size"]) self.assertEqual(self.share_size, int(share["size"]), msg) @test.attr(type=["gate", ]) def test_get_share_v2_1(self): self._get_share('2.1') @test.attr(type=["gate", ]) def test_get_share_with_snapshot_support_key(self): self._get_share('2.2') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.4') def test_get_share_with_consistency_groups_keys(self): self._get_share('2.4') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.6') def test_get_share_with_share_type_name_key(self): self._get_share('2.6') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.9') def test_get_share_export_locations_removed(self): self._get_share('2.9') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.10') def test_get_share_with_access_rules_status(self): self._get_share('2.10') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.11') def test_get_share_with_replication_type_key(self): self._get_share('2.11') @test.attr(type=["gate", ]) def test_list_shares(self): # list shares shares = self.shares_v2_client.list_shares() # verify keys keys = ["name", "id", "links"] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our share id in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) def _list_shares_with_detail(self, version): # list shares shares = self.shares_v2_client.list_shares_with_detail( version=six.text_type(version)) # verify keys keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", "share_type", ] if utils.is_microversion_lt(version, '2.9'): keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): keys.extend(["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.6'): keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): keys.append("replication_type") [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our shares in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) def test_list_shares_with_detail_v2_1(self): self._list_shares_with_detail('2.1') @test.attr(type=["gate", ]) def test_list_shares_with_detail_and_snapshot_support_key(self): self._list_shares_with_detail('2.2') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.4') def test_list_shares_with_detail_consistency_groups_keys(self): self._list_shares_with_detail('2.4') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.6') def test_list_shares_with_detail_share_type_name_key(self): self._list_shares_with_detail('2.6') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.9') def test_list_shares_with_detail_export_locations_removed(self): self._list_shares_with_detail('2.9') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.10') def test_list_shares_with_detail_with_access_rules_status(self): self._list_shares_with_detail('2.10') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.11') def test_list_shares_with_detail_replication_type_key(self): self._list_shares_with_detail('2.11') @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_metadata(self): filters = {'metadata': self.metadata} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertDictContainsSubset( filters['metadata'], share['metadata']) if CONF.share.run_snapshot_tests: self.assertFalse(self.shares[1]['id'] in [s['id'] for s in shares]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_host(self): base_share = self.shares_client.get_share(self.shares[0]['id']) filters = {'host': base_share['host']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(filters['host'], share['host']) @test.attr(type=["gate", ]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_list_shares_with_detail_filter_by_share_network_id(self): base_share = self.shares_client.get_share(self.shares[0]['id']) filters = {'share_network_id': base_share['share_network_id']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual( filters['share_network_id'], share['share_network_id']) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_shares_with_detail_filter_by_snapshot_id(self): filters = {'snapshot_id': self.snap['id']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(filters['snapshot_id'], share['snapshot_id']) self.assertFalse(self.shares[0]['id'] in [s['id'] for s in shares]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_with_asc_sorting(self): filters = {'sort_key': 'created_at', 'sort_dir': 'asc'} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) sorted_list = [share['created_at'] for share in shares] self.assertEqual(sorted(sorted_list), sorted_list) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_existed_name(self): # list shares by name, at least one share is expected params = {"name": self.share_name} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(self.share_name, shares[0]["name"]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_fake_name(self): # list shares by fake name, no shares are expected params = {"name": data_utils.rand_name("fake-nonexistent-name")} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(0, len(shares)) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_active_status(self): # list shares by active status, at least one share is expected params = {"status": "available"} shares = self.shares_client.list_shares_with_detail(params) self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(params["status"], share["status"]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_fake_status(self): # list shares by fake status, no shares are expected params = {"status": 'fake'} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(0, len(shares)) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_all_tenants(self): # non-admin user can get shares only from his project params = {"all_tenants": 1} shares = self.shares_client.list_shares_with_detail(params) self.assertTrue(len(shares) > 0) # get share with detailed info, we need its 'project_id' share = self.shares_client.get_share(self.shares[0]["id"]) project_id = share["project_id"] for share in shares: self.assertEqual(project_id, share["project_id"]) @test.attr(type=["gate", ]) def test_list_shares_public_with_detail(self): public_share = self.create_share( name='public_share', description='public_share_desc', size=1, is_public=True, cleanup_in_class=False ) private_share = self.create_share( name='private_share', description='private_share_desc', size=1, is_public=False, cleanup_in_class=False ) params = {"is_public": True} isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') shares = isolated_client.list_shares_with_detail(params) keys = [ "status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "host", "name", "snapshot_id", "id", "size", "project_id", "is_public", ] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] gen = [sid["id"] for sid in shares if sid["id"] == public_share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) self.assertFalse(any([s["id"] == private_share["id"] for s in shares])) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_get_snapshot(self): # get snapshot get = self.shares_client.get_snapshot(self.snap["id"]) # verify keys expected_keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size"] actual_keys = get.keys() [self.assertIn(key, actual_keys) for key in expected_keys] # verify data msg = "Expected name: '%s', actual name: '%s'" % (self.snap_name, get["name"]) self.assertEqual(self.snap_name, get["name"], msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.snap_desc, get["description"]) self.assertEqual(self.snap_desc, get["description"], msg) msg = "Expected share_id: '%s', "\ "actual share_id: '%s'" % (self.shares[0]["id"], get["share_id"]) self.assertEqual(self.shares[0]["id"], get["share_id"], msg) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots(self): # list share snapshots snaps = self.shares_client.list_snapshots() # verify keys keys = ["id", "name", "links"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots_with_detail(self): # list share snapshots snaps = self.shares_client.list_snapshots_with_detail() # verify keys keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots_with_detail_use_limit(self): for l, o in [('1', '1'), ('0', '1')]: filters = { 'limit': l, 'offset': o, 'share_id': self.shares[0]['id'], } # list snapshots snaps = self.shares_client.list_snapshots_with_detail( params=filters) # Our snapshot should not be listed self.assertEqual(0, len(snaps)) # Only our one snapshot should be listed snaps = self.shares_client.list_snapshots_with_detail( params={'limit': '1', 'offset': '0', 'share_id': self.shares[0]['id']}) self.assertEqual(1, len(snaps['snapshots'])) self.assertEqual(self.snap['id'], snaps['snapshots'][0]['id']) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots_with_detail_filter_by_status_and_name(self): filters = {'status': 'available', 'name': self.snap_name} # list snapshots snaps = self.shares_client.list_snapshots_with_detail( params=filters) # verify response self.assertTrue(len(snaps) > 0) for snap in snaps: self.assertEqual(filters['status'], snap['status']) self.assertEqual(filters['name'], snap['name']) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots_with_detail_and_asc_sorting(self): filters = {'sort_key': 'share_id', 'sort_dir': 'asc'} # list snapshots snaps = self.shares_client.list_snapshots_with_detail( params=filters) # verify response self.assertTrue(len(snaps) > 0) sorted_list = [snap['share_id'] for snap in snaps] self.assertEqual(sorted(sorted_list), sorted_list) @test.attr(type=["gate", ]) @testtools.skipUnless( CONF.share.run_extend_tests, "Share extend tests are disabled.") def test_extend_share(self): share = self.create_share(size=1, cleanup_in_class=False) new_size = 2 # extend share and wait for active status self.shares_v2_client.extend_share(share['id'], new_size) self.shares_client.wait_for_share_status(share['id'], 'available') # check state and new size share_get = self.shares_v2_client.get_share(share['id']) msg = ( "Share could not be extended. " "Expected %(expected)s, got %(actual)s." % { "expected": new_size, "actual": share_get['size'], } ) self.assertEqual(new_size, share_get['size'], msg) @test.attr(type=["gate", ]) @testtools.skipUnless( CONF.share.run_shrink_tests, "Share shrink tests are disabled.") def test_shrink_share(self): share = self.create_share(size=2, cleanup_in_class=False) new_size = 1 # shrink share and wait for active status self.shares_v2_client.shrink_share(share['id'], new_size) self.shares_client.wait_for_share_status(share['id'], 'available') # check state and new size share_get = self.shares_v2_client.get_share(share['id']) msg = ( "Share could not be shrunk. " "Expected %(expected)s, got %(actual)s." % { "expected": new_size, "actual": share_get['size'], } ) self.assertEqual(new_size, share_get['size'], msg) class SharesRenameTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SharesRenameTest, cls).resource_setup() # create share cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.share_size = 1 cls.share = cls.create_share( name=cls.share_name, description=cls.share_desc, size=cls.share_size) if CONF.share.run_snapshot_tests: # create snapshot cls.snap_name = data_utils.rand_name("tempest-snapshot-name") cls.snap_desc = data_utils.rand_name( "tempest-snapshot-description") cls.snap = cls.create_snapshot_wait_for_active( cls.share["id"], cls.snap_name, cls.snap_desc) @test.attr(type=["gate", ]) def test_update_share(self): # get share share = self.shares_client.get_share(self.share['id']) self.assertEqual(self.share_name, share["name"]) self.assertEqual(self.share_desc, share["description"]) self.assertFalse(share["is_public"]) # update share new_name = data_utils.rand_name("tempest-new-name") new_desc = data_utils.rand_name("tempest-new-description") updated = self.shares_client.update_share( share["id"], new_name, new_desc, is_public=True) self.assertEqual(new_name, updated["name"]) self.assertEqual(new_desc, updated["description"]) self.assertTrue(updated["is_public"]) # get share share = self.shares_client.get_share(self.share['id']) self.assertEqual(new_name, share["name"]) self.assertEqual(new_desc, share["description"]) self.assertTrue(share["is_public"]) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_rename_snapshot(self): # get snapshot get = self.shares_client.get_snapshot(self.snap["id"]) self.assertEqual(self.snap_name, get["name"]) self.assertEqual(self.snap_desc, get["description"]) # rename snapshot new_name = data_utils.rand_name("tempest-new-name-for-snapshot") new_desc = data_utils.rand_name("tempest-new-description-for-snapshot") renamed = self.shares_client.rename_snapshot( self.snap["id"], new_name, new_desc) self.assertEqual(new_name, renamed["name"]) self.assertEqual(new_desc, renamed["description"]) # get snapshot get = self.shares_client.get_snapshot(self.snap["id"]) self.assertEqual(new_name, get["name"]) self.assertEqual(new_desc, get["description"]) manila-2.0.0/manila_tempest_tests/tests/api/test_shares_negative.py0000664000567000056710000002563612701407107027045 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base CONF = config.CONF class SharesNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(SharesNegativeTest, cls).resource_setup() cls.share = cls.create_share( name='public_share', description='public_share_desc', size=1, is_public=True, metadata={'key': 'value'} ) @test.attr(type=["negative", "smoke", "gate", ]) def test_create_share_with_invalid_protocol(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share, share_protocol="nonexistent_protocol") @test.attr(type=["negative", "smoke", "gate", ]) def test_create_share_with_wrong_public_value(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share, is_public='truebar') @test.attr(type=["negative", "smoke", "gate", ]) def test_update_share_with_wrong_public_value(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.update_share, self.share["id"], is_public="truebar") @test.attr(type=["negative", "smoke", "gate", ]) def test_get_share_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share, "wrong_share_id") @test.attr(type=["negative", "smoke", "gate", ]) def test_get_share_without_passing_share_id(self): # Should not be able to get share when empty ID is passed self.assertRaises(lib_exc.NotFound, self.shares_client.get_share, '') @test.attr(type=["negative", "smoke", "gate", ]) def test_list_shares_nonadmin_with_nonexistent_share_server_filter(self): # filtering by share server allowed only for admins by default self.assertRaises(lib_exc.Forbidden, self.shares_client.list_shares_with_detail, {'share_server_id': 'fake_share_server_id'}) @test.attr(type=["negative", "smoke", "gate", ]) def test_delete_share_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share, "wrong_share_id") @test.attr(type=["negative", "smoke", "gate", ]) def test_delete_share_without_passing_share_id(self): # Should not be able to delete share when empty ID is passed self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share, '') @test.attr(type=["negative", "smoke", "gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_snapshot_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.create_snapshot, "wrong_share_id") @test.attr(type=["negative", "smoke", "gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_delete_snapshot_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_snapshot, "wrong_share_id") @test.attr(type=["negative", "smoke", "gate", ]) def test_create_share_with_invalid_size(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share, size="#$%") @test.attr(type=["negative", "smoke", "gate", ]) def test_create_share_with_out_passing_size(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share, size="") @test.attr(type=["negative", "smoke", "gate", ]) def test_create_share_with_zero_size(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share, size=0) @test.attr(type=["negative", "gate", ]) def test_create_share_non_existent_az(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.create_share, availability_zone='fake_az') @test.attr(type=["negative", "gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_try_delete_share_with_existing_snapshot(self): # share can not be deleted while snapshot exists # create share share = self.create_share() # create snapshot self.create_snapshot_wait_for_active(share["id"]) # try delete share self.assertRaises(lib_exc.Forbidden, self.shares_client.delete_share, share["id"]) @test.attr(type=["negative", "gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_share_from_snap_with_less_size(self): # requires minimum 5Gb available space skip_msg = "Check disc space for this test" try: # create share share = self.create_share(size=2, cleanup_in_class=False) except share_exceptions.ShareBuildErrorException: self.skip(skip_msg) try: # create snapshot snap = self.create_snapshot_wait_for_active( share["id"], cleanup_in_class=False) except share_exceptions.SnapshotBuildErrorException: self.skip(skip_msg) # try create share from snapshot with less size self.assertRaises(lib_exc.BadRequest, self.create_share, size=1, snapshot_id=snap["id"], cleanup_in_class=False) @test.attr(type=["negative", "smoke", "gate", ]) @testtools.skipIf(not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_create_share_with_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.shares_client.create_share, share_network_id="wrong_sn_id") @test.attr(type=["negative", "smoke", "gate", ]) @testtools.skipIf(not CONF.share.multitenancy_enabled, "Only for multitenancy.") @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_create_share_from_snap_with_different_share_network(self): # create share share = self.create_share(cleanup_in_class=False) # get parent's share network parent_share = self.shares_client.get_share(share["id"]) parent_sn = self.shares_client.get_share_network( parent_share["share_network_id"]) # create new share-network - net duplicate of parent's share new_duplicated_sn = self.create_share_network( cleanup_in_class=False, neutron_net_id=parent_sn["neutron_net_id"], neutron_subnet_id=parent_sn["neutron_subnet_id"], ) # create snapshot of parent share snap = self.create_snapshot_wait_for_active( share["id"], cleanup_in_class=False) # try create share with snapshot using another share-network # 400 bad request is expected self.assertRaises( lib_exc.BadRequest, self.create_share, cleanup_in_class=False, share_network_id=new_duplicated_sn["id"], snapshot_id=snap["id"], ) @test.attr(type=["gate", "smoke", "negative", ]) def test_update_other_tenants_public_share(self): isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') self.assertRaises(lib_exc.Forbidden, isolated_client.update_share, self.share["id"], name="new_name") @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_other_tenants_public_share(self): isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') self.assertRaises(lib_exc.Forbidden, isolated_client.delete_share, self.share['id']) @test.attr(type=["gate", "smoke", "negative", ]) def test_set_metadata_of_other_tenants_public_share(self): isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') self.assertRaises(lib_exc.Forbidden, isolated_client.set_metadata, self.share['id'], {'key': 'value'}) @test.attr(type=["gate", "smoke", "negative", ]) def test_update_metadata_of_other_tenants_public_share(self): isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') self.assertRaises(lib_exc.Forbidden, isolated_client.update_all_metadata, self.share['id'], {'key': 'value'}) @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_metadata_of_other_tenants_public_share(self): isolated_client = self.get_client_with_isolated_creds( type_of_creds='alt') self.assertRaises(lib_exc.Forbidden, isolated_client.delete_metadata, self.share['id'], 'key') @test.attr(type=["gate", "smoke", "negative", ]) def test_list_by_share_server_by_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_shares, params={'share_server_id': 12345}) @test.attr(type=["gate", "smoke", "negative", ]) def test_manage_share_by_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.manage_share, 'fake-host', 'nfs', '/export/path', 'fake-type') @test.attr(type=["gate", "smoke", "negative", ]) def test_unmanage_share_by_user(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.unmanage_share, 'fake-id') manila-2.0.0/manila_tempest_tests/tests/api/admin/0000775000567000056710000000000012701407265023346 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/api/admin/test_migration_negative.py0000664000567000056710000000753412701407107030636 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class MigrationNFSTest(base.BaseSharesAdminTest): """Tests Share Migration. Tests migration in multi-backend environment. """ protocol = "nfs" @classmethod def resource_setup(cls): super(MigrationNFSTest, cls).resource_setup() if not CONF.share.run_migration_tests: raise cls.skipException("Migration tests disabled. Skipping.") cls.share = cls.create_share(cls.protocol) cls.share = cls.shares_client.get_share(cls.share['id']) pools = cls.shares_client.list_pools()['pools'] if len(pools) < 2: raise cls.skipException("At least two different pool entries " "are needed to run migration tests. " "Skipping.") cls.dest_pool = next((x for x in pools if x['name'] != cls.share['host']), None) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_migration_cancel_invalid(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migration_cancel, self.share['id']) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_migration_get_progress_invalid(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migration_get_progress, self.share['id']) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_migration_complete_invalid(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migration_complete, self.share['id']) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.5") def test_migrate_share_with_snapshot_v2_5(self): snap = self.create_snapshot_wait_for_active(self.share['id']) self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migrate_share, self.share['id'], self.dest_pool, True, version='2.5') self.shares_client.delete_snapshot(snap['id']) self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"]) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.5") def test_migrate_share_same_host_v2_5(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migrate_share, self.share['id'], self.share['host'], True, version='2.5') @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.5") def test_migrate_share_not_available_v2_5(self): self.shares_client.reset_state(self.share['id'], 'error') self.shares_client.wait_for_share_status(self.share['id'], 'error') self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.migrate_share, self.share['id'], self.dest_pool, True, version='2.5') self.shares_client.reset_state(self.share['id'], 'available') self.shares_client.wait_for_share_status(self.share['id'], 'available') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_migration.py0000664000567000056710000001204112701407107026741 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF class MigrationNFSTest(base.BaseSharesAdminTest): """Tests Share Migration. Tests migration in multi-backend environment. """ protocol = "nfs" @classmethod def resource_setup(cls): super(MigrationNFSTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) if not CONF.share.run_migration_tests: raise cls.skipException("Migration tests disabled. Skipping.") @test.attr(type=["gate", ]) @base.skip_if_microversion_lt("2.5") def test_migration_empty_v2_5(self): share, dest_pool = self._setup_migration() old_exports = share['export_locations'] share = self.migrate_share(share['id'], dest_pool, version='2.5') self._validate_migration_successful(dest_pool, share, old_exports, version='2.5') @test.attr(type=["gate", ]) @base.skip_if_microversion_lt("2.15") def test_migration_completion_empty_v2_15(self): share, dest_pool = self._setup_migration() old_exports = self.shares_v2_client.list_share_export_locations( share['id'], version='2.15') self.assertNotEmpty(old_exports) old_exports = [x['path'] for x in old_exports if x['is_admin_only'] is False] self.assertNotEmpty(old_exports) share = self.migrate_share( share['id'], dest_pool, version='2.15', notify=False, wait_for_status='data_copying_completed') self._validate_migration_successful(dest_pool, share, old_exports, '2.15', notify=False) share = self.migration_complete(share['id'], dest_pool, version='2.15') self._validate_migration_successful(dest_pool, share, old_exports, version='2.15') def _setup_migration(self): pools = self.shares_client.list_pools()['pools'] if len(pools) < 2: raise self.skipException("At least two different pool entries " "are needed to run migration tests. " "Skipping.") share = self.create_share(self.protocol) share = self.shares_client.get_share(share['id']) self.shares_v2_client.create_access_rule( share['id'], access_to="50.50.50.50", access_level="rw") self.shares_v2_client.wait_for_share_status( share['id'], 'active', status_attr='access_rules_status') self.shares_v2_client.create_access_rule( share['id'], access_to="51.51.51.51", access_level="ro") self.shares_v2_client.wait_for_share_status( share['id'], 'active', status_attr='access_rules_status') dest_pool = next((x for x in pools if x['name'] != share['host']), None) self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool.get('name')) dest_pool = dest_pool['name'] return share, dest_pool def _validate_migration_successful(self, dest_pool, share, old_exports, version, notify=True): if utils.is_microversion_lt(version, '2.9'): new_exports = share['export_locations'] self.assertNotEmpty(new_exports) else: new_exports = self.shares_v2_client.list_share_export_locations( share['id'], version='2.9') self.assertNotEmpty(new_exports) new_exports = [x['path'] for x in new_exports if x['is_admin_only'] is False] self.assertNotEmpty(new_exports) # Share migrated if notify: self.assertEqual(dest_pool, share['host']) for export in old_exports: self.assertFalse(export in new_exports) self.assertEqual('migration_success', share['task_state']) # Share not migrated yet else: self.assertNotEqual(dest_pool, share['host']) for export in old_exports: self.assertTrue(export in new_exports) self.assertEqual('data_copying_completed', share['task_state']) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_types.py0000664000567000056710000001700112701407107027277 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import config # noqa from tempest.lib.common.utils import data_utils # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF @ddt.ddt class ShareTypesAdminTest(base.BaseSharesAdminTest): @test.attr(type=["gate", "smoke", ]) def test_share_type_create_delete(self): name = data_utils.rand_name("tempest-manila") extra_specs = self.add_required_extra_specs_to_dict() # Create share type st_create = self.shares_v2_client.create_share_type( name, extra_specs=extra_specs) self.assertEqual(name, st_create['share_type']['name']) st_id = st_create['share_type']['id'] # Delete share type self.shares_v2_client.delete_share_type(st_id) # Verify deletion of share type self.shares_v2_client.wait_for_resource_deletion(st_id=st_id) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share_type, st_id) def _verify_is_public_key_name(self, share_type, version): old_key_name = 'os-share-type-access:is_public' new_key_name = 'share_type_access:is_public' if utils.is_microversion_gt(version, "2.6"): self.assertIn(new_key_name, share_type) self.assertNotIn(old_key_name, share_type) else: self.assertIn(old_key_name, share_type) self.assertNotIn(new_key_name, share_type) @test.attr(type=["gate", "smoke", ]) @ddt.data('2.0', '2.6', '2.7') def test_share_type_create_get(self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") extra_specs = self.add_required_extra_specs_to_dict({"key": "value", }) # Create share type st_create = self.create_share_type( name, extra_specs=extra_specs, version=version) self.assertEqual(name, st_create['share_type']['name']) self._verify_is_public_key_name(st_create['share_type'], version) st_id = st_create["share_type"]["id"] # Get share type get = self.shares_v2_client.get_share_type(st_id, version=version) self.assertEqual(name, get["share_type"]["name"]) self.assertEqual(st_id, get["share_type"]["id"]) self.assertEqual(extra_specs, get["share_type"]["extra_specs"]) self._verify_is_public_key_name(get['share_type'], version) # Check that backwards compatibility didn't break self.assertDictMatch(get["volume_type"], get["share_type"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('2.0', '2.6', '2.7') def test_share_type_create_list(self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") extra_specs = self.add_required_extra_specs_to_dict() # Create share type st_create = self.create_share_type( name, extra_specs=extra_specs, version=version) self._verify_is_public_key_name(st_create['share_type'], version) st_id = st_create["share_type"]["id"] # list share types st_list = self.shares_v2_client.list_share_types(version=version) sts = st_list["share_types"] self.assertTrue(len(sts) >= 1) self.assertTrue(any(st_id in st["id"] for st in sts)) for st in sts: self._verify_is_public_key_name(st, version) # Check that backwards compatibility didn't break vts = st_list["volume_types"] self.assertEqual(len(sts), len(vts)) for i in range(len(sts)): self.assertDictMatch(sts[i], vts[i]) @test.attr(type=["gate", "smoke", ]) def test_get_share_with_share_type(self): # Data share_name = data_utils.rand_name("share") shr_type_name = data_utils.rand_name("share-type") extra_specs = self.add_required_extra_specs_to_dict({ "storage_protocol": CONF.share.capability_storage_protocol, }) # Create share type st_create = self.create_share_type( shr_type_name, extra_specs=extra_specs) # Create share with share type share = self.create_share( name=share_name, share_type_id=st_create["share_type"]["id"]) self.assertEqual(share["name"], share_name) self.shares_client.wait_for_share_status(share["id"], "available") # Verify share info get = self.shares_v2_client.get_share(share["id"], version="2.5") self.assertEqual(share_name, get["name"]) self.assertEqual(share["id"], get["id"]) self.assertEqual(shr_type_name, get["share_type"]) get = self.shares_v2_client.get_share(share["id"], version="2.6") self.assertEqual(st_create["share_type"]["id"], get["share_type"]) self.assertEqual(shr_type_name, get["share_type_name"]) def test_private_share_type_access(self): name = data_utils.rand_name("tempest-manila") extra_specs = self.add_required_extra_specs_to_dict({"key": "value", }) project_id = self.shares_client.tenant_id # Create private share type st_create = self.create_share_type( name, False, extra_specs=extra_specs) self.assertEqual(name, st_create['share_type']['name']) st_id = st_create["share_type"]["id"] # It should not be listed without access st_list = self.shares_v2_client.list_share_types() sts = st_list["share_types"] self.assertFalse(any(st_id in st["id"] for st in sts)) # List projects that have access for share type - none expected access = self.shares_v2_client.list_access_to_share_type(st_id) self.assertEqual([], access) # Add project access to share type access = self.shares_v2_client.add_access_to_share_type( st_id, project_id) # Now it should be listed st_list = self.shares_client.list_share_types() sts = st_list["share_types"] self.assertTrue(any(st_id in st["id"] for st in sts)) # List projects that have access for share type - one expected access = self.shares_v2_client.list_access_to_share_type(st_id) expected = [{'share_type_id': st_id, 'project_id': project_id}, ] self.assertEqual(expected, access) # Remove project access from share type access = self.shares_v2_client.remove_access_from_share_type( st_id, project_id) # It should not be listed without access st_list = self.shares_client.list_share_types() sts = st_list["share_types"] self.assertFalse(any(st_id in st["id"] for st in sts)) # List projects that have access for share type - none expected access = self.shares_v2_client.list_access_to_share_type(st_id) self.assertEqual([], access) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py0000664000567000056710000000754312701407107032014 0ustar jenkinsjenkins00000000000000# Copyright 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF class ManageNFSSnapshotNegativeTest(base.BaseSharesAdminTest): protocol = 'nfs' @classmethod @base.skip_if_microversion_lt("2.12") @testtools.skipIf( CONF.share.multitenancy_enabled, "Only for driver_handles_share_servers = False driver mode.") @testtools.skipUnless( CONF.share.run_manage_unmanage_snapshot_tests, "Manage/unmanage snapshot tests are disabled.") def resource_setup(cls): super(ManageNFSSnapshotNegativeTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) # Create share type cls.st_name = data_utils.rand_name("tempest-manage-st-name") cls.extra_specs = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': False, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.st = cls.create_share_type( name=cls.st_name, cleanup_in_class=True, extra_specs=cls.extra_specs) # Create share cls.share = cls.create_share( share_type_id=cls.st['share_type']['id'], share_protocol=cls.protocol ) @test.attr(type=["gate", "smoke", "negative", ]) def test_manage_not_found(self): # Manage snapshot fails self.assertRaises(lib_exc.NotFound, self.shares_v2_client.manage_snapshot, 'fake-share-id', 'fake-vol-snap-id', driver_options={}) @test.attr(type=["gate", "smoke", "negative", ]) def test_manage_already_exists(self): # Manage already existing snapshot fails # Create snapshot snap = self.create_snapshot_wait_for_active(self.share['id']) get_snap = self.shares_v2_client.get_snapshot(snap['id']) self.assertEqual(self.share['id'], get_snap['share_id']) self.assertIsNotNone(get_snap['provider_location']) # Manage snapshot fails self.assertRaises(lib_exc.Conflict, self.shares_v2_client.manage_snapshot, self.share['id'], get_snap['provider_location'], driver_options={}) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snap['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snap['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snap['id']) class ManageCIFSSnapshotNegativeTest(ManageNFSSnapshotNegativeTest): protocol = 'cifs' class ManageGLUSTERFSSnapshotNegativeTest(ManageNFSSnapshotNegativeTest): protocol = 'glusterfs' class ManageHDFSSnapshotNegativeTest(ManageNFSSnapshotNegativeTest): protocol = 'hdfs' manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_types_extra_specs_negative.py0000664000567000056710000002767612701407107033604 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base class ExtraSpecsAdminNegativeTest(base.BaseSharesAdminTest): def _create_share_type(self): name = data_utils.rand_name("unique_st_name") extra_specs = self.add_required_extra_specs_to_dict({"key": "value"}) return self.create_share_type(name, extra_specs=extra_specs) @classmethod def resource_setup(cls): super(ExtraSpecsAdminNegativeTest, cls).resource_setup() cls.member_shares_client = clients.Manager().shares_client @test.attr(type=["gate", "smoke", ]) def test_try_create_extra_specs_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.create_share_type_extra_specs, st["share_type"]["id"], self.add_required_extra_specs_to_dict({"key": "new_value"})) @test.attr(type=["gate", "smoke", ]) def test_try_list_extra_specs_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.get_share_type_extra_specs, st["share_type"]["id"]) @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_spec_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.get_share_type_extra_spec, st["share_type"]["id"], "key") @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_specs_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.get_share_type_extra_specs, st["share_type"]["id"]) @test.attr(type=["gate", "smoke", ]) def test_try_read_extra_specs_on_share_type_with_user(self): st = self._create_share_type() share_type = self.member_shares_client.get_share_type( st['share_type']['id']) # Verify a non-admin can only read the required extra-specs expected_keys = ['driver_handles_share_servers', 'snapshot_support'] actual_keys = share_type['share_type']['extra_specs'].keys() self.assertEqual(sorted(expected_keys), sorted(actual_keys), 'Incorrect extra specs visible to non-admin user; ' 'expected %s, got %s' % (expected_keys, actual_keys)) @test.attr(type=["gate", "smoke", ]) def test_try_update_extra_spec_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.update_share_type_extra_spec, st["share_type"]["id"], "key", "new_value") @test.attr(type=["gate", "smoke", ]) def test_try_update_extra_specs_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.update_share_type_extra_specs, st["share_type"]["id"], {"key": "new_value"}) @test.attr(type=["gate", "smoke", ]) def test_try_delete_extra_specs_with_user(self): st = self._create_share_type() self.assertRaises( lib_exc.Forbidden, self.member_shares_client.delete_share_type_extra_spec, st["share_type"]["id"], "key") @test.attr(type=["gate", "smoke", ]) def test_try_set_too_long_key(self): too_big_key = "k" * 256 st = self._create_share_type() self.assertRaises( lib_exc.BadRequest, self.shares_client.create_share_type_extra_specs, st["share_type"]["id"], self.add_required_extra_specs_to_dict({too_big_key: "value"})) @test.attr(type=["gate", "smoke", ]) def test_try_set_too_long_value_with_creation(self): too_big_value = "v" * 256 st = self._create_share_type() self.assertRaises( lib_exc.BadRequest, self.shares_client.create_share_type_extra_specs, st["share_type"]["id"], self.add_required_extra_specs_to_dict({"key": too_big_value})) @test.attr(type=["gate", "smoke", ]) def test_try_set_too_long_value_with_update(self): too_big_value = "v" * 256 st = self._create_share_type() self.shares_client.create_share_type_extra_specs( st["share_type"]["id"], self.add_required_extra_specs_to_dict({"key": "value"})) self.assertRaises( lib_exc.BadRequest, self.shares_client.update_share_type_extra_specs, st["share_type"]["id"], self.add_required_extra_specs_to_dict({"key": too_big_value})) @test.attr(type=["gate", "smoke", ]) def test_try_set_too_long_value_with_update_of_one_key(self): too_big_value = "v" * 256 st = self._create_share_type() self.shares_client.create_share_type_extra_specs( st["share_type"]["id"], self.add_required_extra_specs_to_dict({"key": "value"})) self.assertRaises(lib_exc.BadRequest, self.shares_client.update_share_type_extra_spec, st["share_type"]["id"], "key", too_big_value) @test.attr(type=["gate", "smoke", ]) def test_try_list_es_with_empty_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_specs, "") @test.attr(type=["gate", "smoke", ]) def test_try_list_es_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_specs, data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_create_es_with_empty_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.create_share_type_extra_specs, "", {"key1": "value1", }) @test.attr(type=["gate", "smoke", ]) def test_try_create_es_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.create_share_type_extra_specs, data_utils.rand_name("fake"), {"key1": "value1", }) @test.attr(type=["gate", "smoke", ]) def test_try_create_es_with_empty_specs(self): st = self._create_share_type() self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share_type_extra_specs, st["share_type"]["id"], "") @test.attr(type=["gate", "smoke", ]) def test_try_create_es_with_invalid_specs(self): st = self._create_share_type() self.assertRaises(lib_exc.BadRequest, self.shares_client.create_share_type_extra_specs, st["share_type"]["id"], {"": "value_with_empty_key"}) @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_spec_with_empty_key(self): st = self._create_share_type() self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_spec, st["share_type"]["id"], "") @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_spec_with_invalid_key(self): st = self._create_share_type() self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_spec, st["share_type"]["id"], data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_specs_with_empty_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_specs, "") @test.attr(type=["gate", "smoke", ]) def test_try_get_extra_specs_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type_extra_specs, data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_delete_es_key_with_empty_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_type_extra_spec, "", "key", ) @test.attr(type=["gate", "smoke", ]) def test_try_delete_es_key_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_type_extra_spec, data_utils.rand_name("fake"), "key", ) @test.attr(type=["gate", "smoke", ]) def test_try_delete_with_invalid_key(self): st = self._create_share_type() self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_type_extra_spec, st["share_type"]["id"], data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_update_spec_with_empty_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_type_extra_spec, "", "key", "new_value") @test.attr(type=["gate", "smoke", ]) def test_try_update_spec_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_type_extra_spec, data_utils.rand_name("fake"), "key", "new_value") @test.attr(type=["gate", "smoke", ]) def test_try_update_spec_with_empty_key(self): st = self._create_share_type() self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_type_extra_spec, st["share_type"]["id"], "", "new_value") @test.attr(type=["gate", "smoke", ]) def test_try_update_with_invalid_shr_type_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_type_extra_specs, data_utils.rand_name("fake"), {"key": "new_value"}) @test.attr(type=["gate", "smoke", ]) def test_try_update_with_invalid_specs(self): st = self._create_share_type() self.assertRaises(lib_exc.BadRequest, self.shares_client.update_share_type_extra_specs, st["share_type"]["id"], {"": "new_value"}) @test.attr(type=["gate", "smoke", ]) def test_try_delete_spec_driver_handles_share_servers(self): st = self._create_share_type() # Try delete extra spec 'driver_handles_share_servers' self.assertRaises(lib_exc.Forbidden, self.shares_client.delete_share_type_extra_spec, st["share_type"]["id"], "driver_handles_share_servers") @test.attr(type=["gate", "smoke", ]) def test_try_delete_spec_snapshot_support(self): st = self._create_share_type() # Try delete extra spec 'snapshot_support' self.assertRaises(lib_exc.Forbidden, self.shares_client.delete_share_type_extra_spec, st["share_type"]["id"], "snapshot_support") manila-2.0.0/manila_tempest_tests/tests/api/admin/test_security_services.py0000664000567000056710000000452312701407107030530 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.api import test_security_services class SecurityServiceAdminTest( base.BaseSharesAdminTest, test_security_services.SecurityServiceListMixin): def setUp(self): super(SecurityServiceAdminTest, self).setUp() ss_ldap_data = { 'name': 'ss_ldap', 'dns_ip': '1.1.1.1', 'server': 'fake_server_1', 'domain': 'fake_domain_1', 'user': 'fake_user', 'password': 'pass', } ss_kerberos_data = { 'name': 'ss_kerberos', 'dns_ip': '2.2.2.2', 'server': 'fake_server_2', 'domain': 'fake_domain_2', 'user': 'test_user', 'password': 'word', } self.ss_ldap = self.create_security_service('ldap', **ss_ldap_data) self.ss_kerberos = self.create_security_service( 'kerberos', **ss_kerberos_data) @test.attr(type=["gate", "smoke", ]) def test_list_security_services_all_tenants(self): listed = self.shares_client.list_security_services( params={'all_tenants': 1}) self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) self.assertTrue(any(self.ss_kerberos['id'] == ss['id'] for ss in listed)) keys = ["name", "id", "status", "type", ] [self.assertIn(key, s_s.keys()) for s_s in listed for key in keys] @test.attr(type=["gate", "smoke", ]) def test_list_security_services_invalid_filters(self): listed = self.shares_client.list_security_services( params={'fake_opt': 'some_value'}) self.assertEqual(0, len(listed)) manila-2.0.0/manila_tempest_tests/tests/api/admin/__init__.py0000664000567000056710000000000012701407107025440 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_instances.py0000664000567000056710000000724112701407107030127 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest import test from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF class ShareInstancesTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ShareInstancesTest, cls).resource_setup() cls.share = cls.create_share() @test.attr(type=["gate", ]) def test_get_instances_of_share_v2_3(self): """Test that we get only the 1 share instance back for the share.""" share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version='2.3' ) self.assertEqual(1, len(share_instances), 'Too many share instances found; expected 1, ' 'found %s' % len(share_instances)) si = share_instances[0] self.assertEqual(self.share['id'], si['share_id'], 'Share instance %s has incorrect share id value; ' 'expected %s, got %s.' % (si['id'], self.share['id'], si['share_id'])) @test.attr(type=["gate", ]) def test_list_share_instances_v2_3(self): """Test that we get at least the share instance back for the share.""" share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version='2.3' ) share_ids = [si['share_id'] for si in share_instances] msg = 'Share instance for share %s was not found.' % self.share['id'] self.assertIn(self.share['id'], share_ids, msg) def _get_share_instance(self, version): """Test that we get the proper keys back for the instance.""" share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version=version, ) si = self.shares_v2_client.get_share_instance( share_instances[0]['id'], version=version) expected_keys = [ 'host', 'share_id', 'id', 'share_network_id', 'status', 'availability_zone', 'share_server_id', 'created_at', ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") expected_keys = sorted(expected_keys) actual_keys = sorted(si.keys()) self.assertEqual(expected_keys, actual_keys, 'Share instance %s returned incorrect keys; ' 'expected %s, got %s.' % ( si['id'], expected_keys, actual_keys)) @test.attr(type=["gate", ]) def test_get_share_instance_v2_3(self): self._get_share_instance('2.3') @test.attr(type=["gate", ]) def test_get_share_instance_v2_9(self): self._get_share_instance('2.9') @test.attr(type=["gate", ]) def test_get_share_instance_v2_10(self): self._get_share_instance('2.10') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_consistency_groups.py0000664000567000056710000000556512701407107030725 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF CG_REQUIRED_ELEMENTS = {"id", "name", "description", "created_at", "status", "share_types", "project_id", "host", "links"} @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupsTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ConsistencyGroupsTest, cls).resource_setup() # Create 2 share_types name = data_utils.rand_name("tempest-manila") extra_specs = cls.add_required_extra_specs_to_dict() share_type = cls.create_share_type(name, extra_specs=extra_specs) cls.share_type = share_type['share_type'] name = data_utils.rand_name("tempest-manila") share_type = cls.create_share_type(name, extra_specs=extra_specs) cls.share_type2 = share_type['share_type'] @test.attr(type=["gate", ]) def test_create_cg_with_multiple_share_types_v2_4(self): # Create a consistency group consistency_group = self.create_consistency_group( cleanup_in_class=False, share_type_ids=[self.share_type['id'], self.share_type2['id']], version='2.4', ) self.assertTrue(CG_REQUIRED_ELEMENTS.issubset( consistency_group.keys()), 'At least one expected element missing from consistency group ' 'response. Expected %(expected)s, got %(actual)s.' % { "expected": CG_REQUIRED_ELEMENTS, "actual": consistency_group.keys()}) actual_share_types = consistency_group['share_types'] expected_share_types = [self.share_type['id'], self.share_type2['id']] self.assertEqual(sorted(expected_share_types), sorted(actual_share_types), 'Incorrect share types applied to consistency group ' '%s. Expected %s, got %s' % (consistency_group['id'], expected_share_types, actual_share_types)) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_types_negative.py0000664000567000056710000001027212701407107031164 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base class ShareTypesAdminNegativeTest(base.BaseSharesAdminTest): def _create_share_type(self): name = data_utils.rand_name("unique_st_name") extra_specs = self.add_required_extra_specs_to_dict({"key": "value"}) return self.create_share_type(name, extra_specs=extra_specs) @classmethod def resource_setup(cls): super(ShareTypesAdminNegativeTest, cls).resource_setup() cls.member_shares_client = clients.Manager().shares_client @test.attr(type=["gate", "smoke", ]) def test_create_share_with_nonexistent_share_type(self): self.assertRaises(lib_exc.NotFound, self.create_share, share_type_id=data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_create_share_type_with_empty_name(self): self.assertRaises(lib_exc.BadRequest, self.create_share_type, '') @test.attr(type=["gate", "smoke", ]) def test_create_share_type_with_too_big_name(self): self.assertRaises(lib_exc.BadRequest, self.create_share_type, "x" * 256) @test.attr(type=["gate", "smoke", ]) def test_get_share_type_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_type, data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_delete_share_type_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_type, data_utils.rand_name("fake")) @test.attr(type=["gate", "smoke", ]) def test_try_create_duplicate_of_share_type(self): st = self._create_share_type() self.assertRaises(lib_exc.Conflict, self.create_share_type, st["share_type"]["name"], extra_specs=self.add_required_extra_specs_to_dict()) @test.attr(type=["gate", "smoke", ]) def test_add_share_type_allowed_for_public(self): st = self._create_share_type() self.assertRaises(lib_exc.Conflict, self.shares_client.add_access_to_share_type, st["share_type"]["id"], self.shares_client.tenant_id) @test.attr(type=["gate", "smoke", ]) def test_remove_share_type_allowed_for_public(self): st = self._create_share_type() self.assertRaises(lib_exc.Conflict, self.shares_client.remove_access_from_share_type, st["share_type"]["id"], self.shares_client.tenant_id) @test.attr(type=["gate", "smoke", ]) def test_add_share_type_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.add_access_to_share_type, data_utils.rand_name("fake"), self.shares_client.tenant_id) @test.attr(type=["gate", "smoke", ]) def test_remove_share_type_by_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.remove_access_from_share_type, data_utils.rand_name("fake"), self.shares_client.tenant_id) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_services.py0000664000567000056710000001027012701407107026575 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import test from manila_tempest_tests.tests.api import base @ddt.ddt class ServicesAdminTest(base.BaseSharesAdminTest): def setUp(self): super(ServicesAdminTest, self).setUp() self.services = self.shares_client.list_services() @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_list_services(self, client_name): services = getattr(self, client_name).list_services() self.assertNotEqual(0, len(services)) for service in services: self.assertIsNotNone(service['id']) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_host_name(self, client_name): host = self.services[0]["host"] params = {"host": host} services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(host, service["host"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_binary_name(self, client_name): binary = self.services[0]["binary"] params = {"binary": binary, } services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(binary, service["binary"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_availability_zone(self, client_name): zone = self.services[0]["zone"] params = {"zone": zone, } services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(zone, service["zone"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_status(self, client_name): status = self.services[0]["status"] params = {"status": status, } services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(status, service["status"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_state(self, client_name): state = self.services[0]["state"] params = {"state": state, } services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(state, service["state"]) @test.attr(type=["gate", "smoke", ]) @ddt.data('shares_client', 'shares_v2_client') def test_get_services_by_all_filters(self, client_name): params = { "host": self.services[0]["host"], "binary": self.services[0]["binary"], "zone": self.services[0]["zone"], "status": self.services[0]["status"], "state": self.services[0]["state"], } services = getattr(self, client_name).list_services(params) self.assertNotEqual(0, len(services)) for service in services: self.assertEqual(params["host"], service["host"]) self.assertEqual(params["binary"], service["binary"]) self.assertEqual(params["zone"], service["zone"]) self.assertEqual(params["status"], service["status"]) self.assertEqual(params["state"], service["state"]) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_multi_backend.py0000664000567000056710000000752112701407107027560 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib.common.utils import data_utils # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class ShareMultiBackendTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ShareMultiBackendTest, cls).resource_setup() if not CONF.share.multi_backend: raise cls.skipException("Manila multi-backend tests are disabled.") elif len(CONF.share.backend_names) < 2: raise cls.skipException("For running multi-backend tests required" " two names in config. Skipping.") elif any(not name for name in CONF.share.backend_names): raise cls.skipException("Share backend names can not be empty. " "Skipping.") cls.sts = [] cls.shares = [] share_data_list = [] # Create share types for i in [0, 1]: st_name = data_utils.rand_name("share-type-%s" % str(i)) extra_specs = { "share_backend_name": CONF.share.backend_names[i], } st = cls.create_share_type( name=st_name, extra_specs=cls.add_required_extra_specs_to_dict(extra_specs)) cls.sts.append(st["share_type"]) st_id = st["share_type"]["id"] share_data_list.append({"kwargs": {"share_type_id": st_id}}) # Create shares using precreated share types cls.shares = cls.create_shares(share_data_list) @test.attr(type=["gate", "smoke", ]) def test_share_backend_name_reporting(self): # Share's 'host' should be like "hostname@backend_name" for share in self.shares: get = self.shares_client.get_share(share['id']) self.assertTrue(len(get["host"].split("@")) == 2) @test.attr(type=["gate", "smoke", ]) def test_share_share_type(self): # Share type should be the same as provided with share creation for i in [0, 1]: get = self.shares_v2_client.get_share(self.shares[i]['id'], version="2.5") self.assertEqual(self.sts[i]["name"], get["share_type"]) @test.attr(type=["gate", "smoke", ]) def test_share_share_type_v_2_6(self): # Share type should be the same as provided with share creation for i in [0, 1]: get = self.shares_v2_client.get_share(self.shares[i]['id'], version="2.6") self.assertEqual(self.sts[i]["id"], get["share_type"]) self.assertEqual(self.sts[i]["name"], get["share_type_name"]) @test.attr(type=["gate", ]) def test_share_backend_name_distinction(self): # Different share backends should have different host records if CONF.share.backend_names[0] == CONF.share.backend_names[1]: raise self.skipException("Share backends " "configured with same name. Skipping.") get1 = self.shares_client.get_share(self.shares[0]['id']) get2 = self.shares_client.get_share(self.shares[1]['id']) self.assertNotEqual(get1["host"], get2["host"]) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_quotas.py0000664000567000056710000003065612701407107026300 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest import test # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base CONF = config.CONF class SharesAdminQuotasTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): if not CONF.share.run_quota_tests: msg = "Quota tests are disabled." raise cls.skipException(msg) cls.os = clients.AdminManager() super(SharesAdminQuotasTest, cls).resource_setup() cls.user_id = cls.shares_v2_client.user_id cls.tenant_id = cls.shares_v2_client.tenant_id @test.attr(type=["gate", "smoke", ]) def test_default_quotas(self): quotas = self.shares_v2_client.default_quotas(self.tenant_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) @test.attr(type=["gate", "smoke", ]) def test_show_quotas(self): quotas = self.shares_v2_client.show_quotas(self.tenant_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) @test.attr(type=["gate", "smoke", ]) def test_show_quotas_for_user(self): quotas = self.shares_v2_client.show_quotas( self.tenant_id, self.user_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) class SharesAdminQuotasUpdateTest(base.BaseSharesAdminTest): force_tenant_isolation = True client_version = '2' @classmethod def resource_setup(cls): if not CONF.share.run_quota_tests: msg = "Quota tests are disabled." raise cls.skipException(msg) cls.os = clients.AdminManager() super(SharesAdminQuotasUpdateTest, cls).resource_setup() def setUp(self): super(self.__class__, self).setUp() self.client = self.get_client_with_isolated_creds( client_version=self.client_version) self.tenant_id = self.client.tenant_id self.user_id = self.client.user_id @test.attr(type=["gate", "smoke", ]) def test_update_tenant_quota_shares(self): # get current quotas quotas = self.client.show_quotas(self.tenant_id) new_quota = int(quotas["shares"]) + 2 # set new quota for shares updated = self.client.update_quotas(self.tenant_id, shares=new_quota) self.assertEqual(new_quota, int(updated["shares"])) @test.attr(type=["gate", "smoke", ]) def test_update_user_quota_shares(self): # get current quotas quotas = self.client.show_quotas(self.tenant_id, self.user_id) new_quota = int(quotas["shares"]) - 1 # set new quota for shares updated = self.client.update_quotas( self.tenant_id, self.user_id, shares=new_quota) self.assertEqual(new_quota, int(updated["shares"])) @test.attr(type=["gate", "smoke", ]) def test_update_tenant_quota_snapshots(self): # get current quotas quotas = self.client.show_quotas(self.tenant_id) new_quota = int(quotas["snapshots"]) + 2 # set new quota for snapshots updated = self.client.update_quotas( self.tenant_id, snapshots=new_quota) self.assertEqual(new_quota, int(updated["snapshots"])) @test.attr(type=["gate", "smoke", ]) def test_update_user_quota_snapshots(self): # get current quotas quotas = self.client.show_quotas(self.tenant_id, self.user_id) new_quota = int(quotas["snapshots"]) - 1 # set new quota for snapshots updated = self.client.update_quotas( self.tenant_id, self.user_id, snapshots=new_quota) self.assertEqual(new_quota, int(updated["snapshots"])) @test.attr(type=["gate", "smoke", ]) def test_update_tenant_quota_gigabytes(self): # get current quotas custom = self.client.show_quotas(self.tenant_id) # make quotas for update gigabytes = int(custom["gigabytes"]) + 2 # set new quota for shares updated = self.client.update_quotas( self.tenant_id, gigabytes=gigabytes) self.assertEqual(gigabytes, int(updated["gigabytes"])) @test.attr(type=["gate", "smoke", ]) def test_update_tenant_quota_snapshot_gigabytes(self): # get current quotas custom = self.client.show_quotas(self.tenant_id) # make quotas for update snapshot_gigabytes = int(custom["snapshot_gigabytes"]) + 2 # set new quota for shares updated = self.client.update_quotas( self.tenant_id, snapshot_gigabytes=snapshot_gigabytes) self.assertEqual(snapshot_gigabytes, int(updated["snapshot_gigabytes"])) @test.attr(type=["gate", "smoke", ]) def test_update_user_quota_gigabytes(self): # get current quotas custom = self.client.show_quotas(self.tenant_id, self.user_id) # make quotas for update gigabytes = int(custom["gigabytes"]) - 1 # set new quota for shares updated = self.client.update_quotas( self.tenant_id, self.user_id, gigabytes=gigabytes) self.assertEqual(gigabytes, int(updated["gigabytes"])) @test.attr(type=["gate", "smoke", ]) def test_update_user_quota_snapshot_gigabytes(self): # get current quotas custom = self.client.show_quotas(self.tenant_id, self.user_id) # make quotas for update snapshot_gigabytes = int(custom["snapshot_gigabytes"]) - 1 # set new quota for shares updated = self.client.update_quotas( self.tenant_id, self.user_id, snapshot_gigabytes=snapshot_gigabytes) self.assertEqual(snapshot_gigabytes, int(updated["snapshot_gigabytes"])) @test.attr(type=["gate", "smoke", ]) def test_update_tenant_quota_share_networks(self): # get current quotas quotas = self.client.show_quotas(self.tenant_id) new_quota = int(quotas["share_networks"]) + 2 # set new quota for share-networks updated = self.client.update_quotas( self.tenant_id, share_networks=new_quota) self.assertEqual(new_quota, int(updated["share_networks"])) @test.attr(type=["gate", "smoke", ]) def test_update_user_quota_share_networks(self): # get current quotas quotas = self.client.show_quotas( self.tenant_id, self.user_id) new_quota = int(quotas["share_networks"]) - 1 # set new quota for share-networks updated = self.client.update_quotas( self.tenant_id, self.user_id, share_networks=new_quota) self.assertEqual(new_quota, int(updated["share_networks"])) @test.attr(type=["gate", "smoke", ]) def test_reset_tenant_quotas(self): # get default_quotas default = self.client.default_quotas(self.tenant_id) # get current quotas custom = self.client.show_quotas(self.tenant_id) # make quotas for update shares = int(custom["shares"]) + 2 snapshots = int(custom["snapshots"]) + 2 gigabytes = int(custom["gigabytes"]) + 2 snapshot_gigabytes = int(custom["snapshot_gigabytes"]) + 2 share_networks = int(custom["share_networks"]) + 2 # set new quota updated = self.client.update_quotas( self.tenant_id, shares=shares, snapshots=snapshots, gigabytes=gigabytes, snapshot_gigabytes=snapshot_gigabytes, share_networks=share_networks) self.assertEqual(shares, int(updated["shares"])) self.assertEqual(snapshots, int(updated["snapshots"])) self.assertEqual(gigabytes, int(updated["gigabytes"])) self.assertEqual(snapshot_gigabytes, int(updated["snapshot_gigabytes"])) self.assertEqual(share_networks, int(updated["share_networks"])) # reset customized quotas self.client.reset_quotas(self.tenant_id) # verify quotas reseted = self.client.show_quotas(self.tenant_id) self.assertEqual(int(default["shares"]), int(reseted["shares"])) self.assertEqual(int(default["snapshots"]), int(reseted["snapshots"])) self.assertEqual(int(default["gigabytes"]), int(reseted["gigabytes"])) self.assertEqual(int(default["share_networks"]), int(reseted["share_networks"])) @test.attr(type=["gate", "smoke", ]) def test_unlimited_quota_for_shares(self): self.client.update_quotas(self.tenant_id, shares=-1) quotas = self.client.show_quotas(self.tenant_id) self.assertEqual(-1, quotas.get('shares')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_user_quota_for_shares(self): self.client.update_quotas( self.tenant_id, self.user_id, shares=-1) quotas = self.client.show_quotas(self.tenant_id, self.user_id) self.assertEqual(-1, quotas.get('shares')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_quota_for_snapshots(self): self.client.update_quotas(self.tenant_id, snapshots=-1) quotas = self.client.show_quotas(self.tenant_id) self.assertEqual(-1, quotas.get('snapshots')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_user_quota_for_snapshots(self): self.client.update_quotas( self.tenant_id, self.user_id, snapshots=-1) quotas = self.client.show_quotas(self.tenant_id, self.user_id) self.assertEqual(-1, quotas.get('snapshots')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_quota_for_gigabytes(self): self.client.update_quotas(self.tenant_id, gigabytes=-1) quotas = self.client.show_quotas(self.tenant_id) self.assertEqual(-1, quotas.get('gigabytes')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_quota_for_snapshot_gigabytes(self): self.client.update_quotas( self.tenant_id, snapshot_gigabytes=-1) quotas = self.client.show_quotas(self.tenant_id) self.assertEqual(-1, quotas.get('snapshot_gigabytes')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_user_quota_for_gigabytes(self): self.client.update_quotas( self.tenant_id, self.user_id, gigabytes=-1) quotas = self.client.show_quotas(self.tenant_id, self.user_id) self.assertEqual(-1, quotas.get('gigabytes')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_user_quota_for_snapshot_gigabytes(self): self.client.update_quotas( self.tenant_id, self.user_id, snapshot_gigabytes=-1) quotas = self.client.show_quotas(self.tenant_id, self.user_id) self.assertEqual(-1, quotas.get('snapshot_gigabytes')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_quota_for_share_networks(self): self.client.update_quotas(self.tenant_id, share_networks=-1) quotas = self.client.show_quotas(self.tenant_id) self.assertEqual(-1, quotas.get('share_networks')) @test.attr(type=["gate", "smoke", ]) def test_unlimited_user_quota_for_share_networks(self): self.client.update_quotas( self.tenant_id, self.user_id, share_networks=-1) quotas = self.client.show_quotas(self.tenant_id, self.user_id) self.assertEqual(-1, quotas.get('share_networks')) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_export_locations_negative.py0000664000567000056710000000702512701407107032234 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib import exceptions as lib_exc from tempest import test from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base CONF = config.CONF @base.skip_if_microversion_not_supported("2.9") class ExportLocationsNegativeTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ExportLocationsNegativeTest, cls).resource_setup() cls.admin_client = cls.shares_v2_client cls.member_client = clients.Manager().shares_v2_client cls.share = cls.create_share() cls.share = cls.shares_v2_client.get_share(cls.share['id']) cls.share_instances = cls.shares_v2_client.get_instances_of_share( cls.share['id']) @test.attr(type=["gate", "negative"]) def test_get_export_locations_by_inexistent_share(self): self.assertRaises( lib_exc.NotFound, self.admin_client.list_share_export_locations, "fake-inexistent-share-id", ) @test.attr(type=["gate", "negative"]) def test_get_inexistent_share_export_location(self): self.assertRaises( lib_exc.NotFound, self.admin_client.get_share_export_location, self.share['id'], "fake-inexistent-share-instance-id", ) @test.attr(type=["gate", "negative"]) def test_get_export_locations_by_inexistent_share_instance(self): self.assertRaises( lib_exc.NotFound, self.admin_client.list_share_instance_export_locations, "fake-inexistent-share-instance-id", ) @test.attr(type=["gate", "negative"]) def test_get_inexistent_share_instance_export_location(self): for share_instance in self.share_instances: self.assertRaises( lib_exc.NotFound, self.admin_client.get_share_instance_export_location, share_instance['id'], "fake-inexistent-share-instance-id", ) @test.attr(type=["gate", "negative"]) def test_list_share_instance_export_locations_by_member(self): for share_instance in self.share_instances: self.assertRaises( lib_exc.Forbidden, self.member_client.list_share_instance_export_locations, "fake-inexistent-share-instance-id", ) @test.attr(type=["gate", "negative"]) def test_get_share_instance_export_location_by_member(self): for share_instance in self.share_instances: export_locations = ( self.admin_client.list_share_instance_export_locations( share_instance['id'])) for el in export_locations: self.assertRaises( lib_exc.Forbidden, self.member_client.get_share_instance_export_location, share_instance['id'], el['id'], ) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_quotas_negative.py0000664000567000056710000001654012701407107030156 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base CONF = config.CONF class SharesAdminQuotasNegativeTest(base.BaseSharesAdminTest): force_tenant_isolation = True @classmethod def resource_setup(cls): if not CONF.share.run_quota_tests: msg = "Quota tests are disabled." raise cls.skipException(msg) cls.os = clients.AdminManager() super(SharesAdminQuotasNegativeTest, cls).resource_setup() cls.user_id = cls.shares_client.user_id cls.tenant_id = cls.shares_client.tenant_id @test.attr(type=["gate", "smoke", "negative"]) def test_get_quotas_with_empty_tenant_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.show_quotas, "") @test.attr(type=["gate", "smoke", "negative"]) def test_reset_quotas_with_empty_tenant_id(self): client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.NotFound, client.reset_quotas, "") @test.attr(type=["gate", "smoke", "negative"]) def test_update_shares_quota_with_wrong_data(self): # -1 is acceptable value as unlimited client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, shares=-2) @test.attr(type=["gate", "smoke", "negative"]) def test_update_snapshots_quota_with_wrong_data(self): # -1 is acceptable value as unlimited client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, snapshots=-2) @test.attr(type=["gate", "smoke", "negative"]) def test_update_gigabytes_quota_with_wrong_data(self): # -1 is acceptable value as unlimited client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, gigabytes=-2) @test.attr(type=["gate", "smoke", "negative"]) def test_update_snapshot_gigabytes_quota_with_wrong_data(self): # -1 is acceptable value as unlimited client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, snapshot_gigabytes=-2) @test.attr(type=["gate", "smoke", "negative"]) def test_update_share_networks_quota_with_wrong_data(self): # -1 is acceptable value as unlimited client = self.get_client_with_isolated_creds() self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, share_networks=-2) @test.attr(type=["gate", "smoke", "negative"]) def test_create_share_with_size_bigger_than_quota(self): quotas = self.shares_client.show_quotas( self.shares_client.tenant_id) overquota = int(quotas['gigabytes']) + 2 # try schedule share with size, bigger than gigabytes quota self.assertRaises(lib_exc.OverLimit, self.create_share, size=overquota) @test.attr(type=["gate", "smoke", "negative"]) def test_try_set_user_quota_shares_bigger_than_tenant_quota(self): client = self.get_client_with_isolated_creds() # get current quotas for tenant tenant_quotas = client.show_quotas(client.tenant_id) # try set user quota for shares bigger than tenant quota bigger_value = int(tenant_quotas["shares"]) + 2 self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, client.user_id, shares=bigger_value) @test.attr(type=["gate", "smoke", "negative"]) def test_try_set_user_quota_snaps_bigger_than_tenant_quota(self): client = self.get_client_with_isolated_creds() # get current quotas for tenant tenant_quotas = client.show_quotas(client.tenant_id) # try set user quota for snapshots bigger than tenant quota bigger_value = int(tenant_quotas["snapshots"]) + 2 self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, client.user_id, snapshots=bigger_value) @test.attr(type=["gate", "smoke", "negative"]) def test_try_set_user_quota_gigabytes_bigger_than_tenant_quota(self): client = self.get_client_with_isolated_creds() # get current quotas for tenant tenant_quotas = client.show_quotas(client.tenant_id) # try set user quota for gigabytes bigger than tenant quota bigger_value = int(tenant_quotas["gigabytes"]) + 2 self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, client.user_id, gigabytes=bigger_value) @test.attr(type=["gate", "smoke", "negative"]) def test_try_set_user_quota_snap_gigabytes_bigger_than_tenant_quota(self): client = self.get_client_with_isolated_creds() # get current quotas for tenant tenant_quotas = client.show_quotas(client.tenant_id) # try set user quota for snapshot gigabytes bigger than tenant quota bigger_value = int(tenant_quotas["snapshot_gigabytes"]) + 2 self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, client.user_id, snapshot_gigabytes=bigger_value) @test.attr(type=["gate", "smoke", "negative"]) def test_try_set_user_quota_share_networks_bigger_than_tenant_quota(self): client = self.get_client_with_isolated_creds() # get current quotas for tenant tenant_quotas = client.show_quotas(client.tenant_id) # try set user quota for share_networks bigger than tenant quota bigger_value = int(tenant_quotas["share_networks"]) + 2 self.assertRaises(lib_exc.BadRequest, client.update_quotas, client.tenant_id, client.user_id, share_networks=bigger_value) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py0000664000567000056710000001222012701407107030116 0ustar jenkinsjenkins00000000000000# Copyright 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF class ManageNFSSnapshotTest(base.BaseSharesAdminTest): protocol = 'nfs' # NOTE(vponomaryov): be careful running these tests using generic driver # because cinder volume snapshots won't be deleted. @classmethod @base.skip_if_microversion_lt("2.12") @testtools.skipIf( CONF.share.multitenancy_enabled, "Only for driver_handles_share_servers = False driver mode.") @testtools.skipUnless( CONF.share.run_manage_unmanage_snapshot_tests, "Manage/unmanage snapshot tests are disabled.") def resource_setup(cls): super(ManageNFSSnapshotTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) # Create share type cls.st_name = data_utils.rand_name("tempest-manage-st-name") cls.extra_specs = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': False, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.st = cls.create_share_type( name=cls.st_name, cleanup_in_class=True, extra_specs=cls.extra_specs) creation_data = {'kwargs': { 'share_type_id': cls.st['share_type']['id'], 'share_protocol': cls.protocol, }} # Data for creating shares data = [creation_data] shares_created = cls.create_shares(data) cls.snapshot = None cls.shares = [] # Load all share data (host, etc.) for share in shares_created: cls.shares.append(cls.shares_v2_client.get_share(share['id'])) # Create snapshot snap_name = data_utils.rand_name("tempest-snapshot-name") snap_desc = data_utils.rand_name( "tempest-snapshot-description") snap = cls.create_snapshot_wait_for_active( share['id'], snap_name, snap_desc) cls.snapshot = cls.shares_v2_client.get_snapshot(snap['id']) # Unmanage snapshot cls.shares_v2_client.unmanage_snapshot(snap['id']) cls.shares_client.wait_for_resource_deletion( snapshot_id=snap['id']) def _test_manage(self, snapshot, version=CONF.share.max_api_microversion): name = ("Name for 'managed' snapshot that had ID %s" % snapshot['id']) description = "Description for 'managed' snapshot" # Manage snapshot share_id = snapshot['share_id'] snapshot = self.shares_v2_client.manage_snapshot( share_id, snapshot['provider_location'], name=name, description=description, driver_options={} ) # Add managed snapshot to cleanup queue self.method_resources.insert( 0, {'type': 'snapshot', 'id': snapshot['id'], 'client': self.shares_v2_client}) # Wait for success self.shares_v2_client.wait_for_snapshot_status(snapshot['id'], 'available') # Verify data of managed snapshot get_snapshot = self.shares_v2_client.get_snapshot(snapshot['id']) self.assertEqual(name, get_snapshot['name']) self.assertEqual(description, get_snapshot['description']) self.assertEqual(snapshot['share_id'], get_snapshot['share_id']) self.assertEqual(snapshot['provider_location'], get_snapshot['provider_location']) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snapshot['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snapshot['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snapshot['id']) @test.attr(type=["gate", "smoke"]) def test_manage(self): # Manage snapshot self._test_manage(snapshot=self.snapshot) class ManageCIFSSnapshotTest(ManageNFSSnapshotTest): protocol = 'cifs' class ManageGLUSTERFSSnapshotTest(ManageNFSSnapshotTest): protocol = 'glusterfs' class ManageHDFSSnapshotTest(ManageNFSSnapshotTest): protocol = 'hdfs' manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_types_extra_specs.py0000664000567000056710000001053312701407107031702 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from tempest.lib.common.utils import data_utils # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base class ExtraSpecsReadAdminTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ExtraSpecsReadAdminTest, cls).resource_setup() cls.share_type_name = data_utils.rand_name("share-type") cls.required_extra_specs = cls.add_required_extra_specs_to_dict() cls.share_type = cls.create_share_type( cls.share_type_name, extra_specs=cls.required_extra_specs) cls.st_id = cls.share_type["share_type"]["id"] cls.custom_extra_specs = {"key1": "value1", "key2": "value2"} cls.expected_extra_specs = copy.copy(cls.custom_extra_specs) cls.expected_extra_specs.update(cls.required_extra_specs) cls.shares_client.create_share_type_extra_specs( cls.st_id, cls.custom_extra_specs) @test.attr(type=["gate", "smoke", ]) def test_get_one_share_type_extra_spec(self): es_get_one = self.shares_client.get_share_type_extra_spec( self.st_id, "key1") self.assertEqual({"key1": self.custom_extra_specs["key1"]}, es_get_one) @test.attr(type=["gate", "smoke", ]) def test_get_all_share_type_extra_specs(self): es_get_all = self.shares_client.get_share_type_extra_specs(self.st_id) self.assertEqual(self.expected_extra_specs, es_get_all) class ExtraSpecsWriteAdminTest(base.BaseSharesAdminTest): def setUp(self): super(ExtraSpecsWriteAdminTest, self).setUp() self.required_extra_specs = self.add_required_extra_specs_to_dict() self.custom_extra_specs = {"key1": "value1", "key2": "value2"} self.share_type_name = data_utils.rand_name("share-type") # Create share type self.share_type = self.create_share_type( self.share_type_name, extra_specs=self.required_extra_specs) self.st_id = self.share_type['share_type']['id'] # Create extra specs for share type self.shares_client.create_share_type_extra_specs( self.st_id, self.custom_extra_specs) @test.attr(type=["gate", "smoke", ]) def test_update_one_share_type_extra_spec(self): self.custom_extra_specs["key1"] = "fake_value1_updated" # Update extra specs of share type update_one = self.shares_client.update_share_type_extra_spec( self.st_id, "key1", self.custom_extra_specs["key1"]) self.assertEqual({"key1": self.custom_extra_specs["key1"]}, update_one) get = self.shares_client.get_share_type_extra_specs(self.st_id) expected_extra_specs = self.custom_extra_specs expected_extra_specs.update(self.required_extra_specs) self.assertEqual(self.custom_extra_specs, get) @test.attr(type=["gate", "smoke", ]) def test_update_all_share_type_extra_specs(self): self.custom_extra_specs["key2"] = "value2_updated" # Update extra specs of share type update_all = self.shares_client.update_share_type_extra_specs( self.st_id, self.custom_extra_specs) self.assertEqual(self.custom_extra_specs, update_all) get = self.shares_client.get_share_type_extra_specs(self.st_id) expected_extra_specs = self.custom_extra_specs expected_extra_specs.update(self.required_extra_specs) self.assertEqual(self.custom_extra_specs, get) @test.attr(type=["gate", "smoke", ]) def test_delete_one_share_type_extra_spec(self): # Delete one extra spec for share type self.shares_client.delete_share_type_extra_spec(self.st_id, "key1") # Get metadata get = self.shares_client.get_share_type_extra_specs(self.st_id) self.assertNotIn('key1', get) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_servers_negative.py0000664000567000056710000001121512701407107031507 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base class ShareServersNegativeAdminTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ShareServersNegativeAdminTest, cls).resource_setup() cls.member_shares_client = clients.Manager().shares_client @test.attr(type=["gate", "smoke", "negative", ]) def test_try_list_share_servers_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.list_share_servers) @test.attr(type=["gate", "smoke", "negative", ]) def test_try_show_share_server_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.show_share_server, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_try_show_share_server_details_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.show_share_server_details, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_show_share_server_with_inexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.show_share_server, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_show_share_server_details_with_inexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.show_share_server_details, 'fake_id') @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_wrong_filter_key(self): search_opts = {'fake_filter_key': 'ACTIVE'} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_wrong_filter_value(self): search_opts = {'host': 123} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_status(self): search_opts = {"status": data_utils.rand_name("fake_status")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_host(self): search_opts = {"host": data_utils.rand_name("fake_host")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_project(self): search_opts = {"project_id": data_utils.rand_name("fake_project_id")} servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_list_share_servers_with_fake_share_network(self): search_opts = { "share_network": data_utils.rand_name("fake_share_network"), } servers = self.shares_client.list_share_servers(search_opts) self.assertEqual(0, len(servers)) @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_share_server_with_nonexistent_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_server, "fake_nonexistent_share_server_id") @test.attr(type=["gate", "smoke", "negative", ]) def test_delete_share_server_with_member(self): self.assertRaises(lib_exc.Forbidden, self.member_shares_client.delete_share_server, "fake_nonexistent_share_server_id") manila-2.0.0/manila_tempest_tests/tests/api/admin/test_services_negative.py0000664000567000056710000000767212701407107030473 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest.lib import exceptions as lib_exc from tempest import test from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base @ddt.ddt class ServicesAdminNegativeTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ServicesAdminNegativeTest, cls).resource_setup() user_clients = clients.Manager() cls.user_shares_client = user_clients.shares_client @test.attr(type=["gate", "smoke", "negative", ]) def test_list_services_with_non_admin_user(self): self.assertRaises(lib_exc.Forbidden, self.user_shares_client.list_services) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_params(self): # All services are expected if send the request with invalid parameter services = self.shares_client.list_services() params = {'fake_param': 'fake_param_value'} services_fake = self.shares_client.list_services(params) self.assertEqual(len(services), len(services_fake)) # "update_at" field could be updated before second request, # so do not take it in account. for service in services + services_fake: service["updated_at"] = "removed_possible_difference" msg = ('Unexpected service list. Expected %s, got %s.' % (services, services_fake)) self.assertEqual(sorted(services, key=lambda service: service['id']), sorted(services_fake, key=lambda service: service['id']), msg) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_host(self): params = {'host': 'fake_host'} services_fake = self.shares_client.list_services(params) self.assertEqual(0, len(services_fake)) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_binary(self): params = {'binary': 'fake_binary'} services_fake = self.shares_client.list_services(params) self.assertEqual(0, len(services_fake)) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_zone(self): params = {'zone': 'fake_zone'} services_fake = self.shares_client.list_services(params) self.assertEqual(0, len(services_fake)) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_status(self): params = {'status': 'fake_status'} services_fake = self.shares_client.list_services(params) self.assertEqual(0, len(services_fake)) @test.attr(type=["gate", "smoke", "negative", ]) def test_get_service_by_invalid_state(self): params = {'state': 'fake_state'} services_fake = self.shares_client.list_services(params) self.assertEqual(0, len(services_fake)) @test.attr(type=["gate", "smoke", "negative", ]) @ddt.data( ('os-services', '2.7'), ('services', '2.6'), ('services', '2.0'), ) @ddt.unpack @base.skip_if_microversion_not_supported("2.7") def test_list_services_with_wrong_versions(self, url, version): self.assertRaises( lib_exc.NotFound, self.shares_v2_client.list_services, version=version, url=url, ) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_manage.py0000664000567000056710000001656512701407112027375 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF class ManageNFSShareTest(base.BaseSharesAdminTest): protocol = 'nfs' # NOTE(vponomaryov): be careful running these tests using generic driver # because cinder volumes will stay attached to service Nova VM and # won't be deleted. @classmethod @testtools.skipIf( CONF.share.multitenancy_enabled, "Only for driver_handles_share_servers = False driver mode.") @testtools.skipUnless( CONF.share.run_manage_unmanage_tests, "Manage/unmanage tests are disabled.") def resource_setup(cls): super(ManageNFSShareTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) # Create share types cls.st_name = data_utils.rand_name("manage-st-name") cls.st_name_invalid = data_utils.rand_name("manage-st-name-invalid") cls.extra_specs = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': False, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.extra_specs_invalid = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': True, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.st = cls.create_share_type( name=cls.st_name, cleanup_in_class=True, extra_specs=cls.extra_specs) cls.st_invalid = cls.create_share_type( name=cls.st_name_invalid, cleanup_in_class=True, extra_specs=cls.extra_specs_invalid) creation_data = {'kwargs': { 'share_type_id': cls.st['share_type']['id'], 'share_protocol': cls.protocol, }} # Data for creating shares in parallel data = [creation_data, creation_data] if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.8"): data.append(creation_data) shares_created = cls.create_shares(data) cls.shares = [] # Load all share data (host, etc.) for share in shares_created: # Unmanage shares from manila cls.shares.append(cls.shares_client.get_share(share['id'])) cls.shares_client.unmanage_share(share['id']) cls.shares_client.wait_for_resource_deletion( share_id=share['id']) def _test_manage(self, share, is_public=False, version=CONF.share.max_api_microversion): name = "Name for 'managed' share that had ID %s" % \ share['id'] description = "Description for 'managed' share" # Manage share share = self.shares_v2_client.manage_share( service_host=share['host'], export_path=share['export_locations'][0], protocol=share['share_proto'], share_type_id=self.st['share_type']['id'], name=name, description=description, is_public=is_public, ) # Add managed share to cleanup queue self.method_resources.insert( 0, {'type': 'share', 'id': share['id'], 'client': self.shares_client}) # Wait for success self.shares_v2_client.wait_for_share_status(share['id'], 'available') # Verify data of managed share get = self.shares_v2_client.get_share(share['id'], version="2.5") self.assertEqual(name, get['name']) self.assertEqual(description, get['description']) self.assertEqual(share['host'], get['host']) self.assertEqual(share['share_proto'], get['share_proto']) self.assertEqual(self.st['share_type']['name'], get['share_type']) share = self.shares_v2_client.get_share(share['id'], version="2.6") self.assertEqual(self.st['share_type']['id'], share['share_type']) if utils.is_microversion_ge(version, "2.8"): self.assertEqual(is_public, share['is_public']) else: self.assertFalse(share['is_public']) # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id']) @base.skip_if_microversion_not_supported("2.8") @test.attr(type=["gate", "smoke"]) def test_manage_with_is_public_True(self): self._test_manage(share=self.shares[2], is_public=True) @test.attr(type=["gate", "smoke"]) def test_manage(self): # After 'unmanage' operation, share instance should be deleted. # Assert not related to 'manage' test, but placed here for # resource optimization. share_instance_list = self.shares_v2_client.list_share_instances() share_ids = [si['share_id'] for si in share_instance_list] self.assertNotIn(self.shares[0]['id'], share_ids) self._test_manage(share=self.shares[0]) @test.attr(type=["gate", "smoke"]) def test_manage_retry(self): # Manage share with invalid parameters self.assertRaises( lib_exc.Conflict, self.shares_v2_client.manage_share, service_host=self.shares[1]['host'], export_path=self.shares[1]['export_locations'][0], protocol=self.shares[1]['share_proto'], share_type_id=self.st_invalid['share_type']['id']) share = self.shares_v2_client.manage_share( service_host=self.shares[1]['host'], export_path=self.shares[1]['export_locations'][0], protocol=self.shares[1]['share_proto'], share_type_id=self.st['share_type']['id']) self.shares_v2_client.wait_for_share_status(share['id'], 'available') # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id']) class ManageCIFSShareTest(ManageNFSShareTest): protocol = 'cifs' class ManageGLUSTERFSShareTest(ManageNFSShareTest): protocol = 'glusterfs' class ManageHDFSShareTest(ManageNFSShareTest): protocol = 'hdfs' class ManageCephFSShareTest(ManageNFSShareTest): protocol = 'cephfs' manila-2.0.0/manila_tempest_tests/tests/api/admin/test_consistency_group_actions.py0000664000567000056710000001147012701407107032252 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupActionsTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ConsistencyGroupActionsTest, cls).resource_setup() # Create 2 share_types name = data_utils.rand_name("tempest-manila") extra_specs = cls.add_required_extra_specs_to_dict() share_type = cls.create_share_type(name, extra_specs=extra_specs) cls.share_type = share_type['share_type'] name = data_utils.rand_name("tempest-manila") share_type = cls.create_share_type(name, extra_specs=extra_specs) cls.share_type2 = share_type['share_type'] # Create a consistency group cls.consistency_group = cls.create_consistency_group( share_type_ids=[cls.share_type['id'], cls.share_type2['id']]) @test.attr(type=["gate", ]) def test_create_cg_from_cgsnapshot_with_multiple_share_types_v2_4(self): # Create cgsnapshot cgsnapshot = self.create_cgsnapshot_wait_for_active( self.consistency_group["id"], cleanup_in_class=False, version='2.4', ) new_consistency_group = self.create_consistency_group( cleanup_in_class=False, source_cgsnapshot_id=cgsnapshot['id'], version='2.4', ) # Verify share_types are the same expected_types = sorted(self.consistency_group['share_types']) actual_types = sorted(new_consistency_group['share_types']) self.assertEqual(expected_types, actual_types, 'Expected share types of %s, but got %s.' % ( expected_types, actual_types)) @test.attr(type=["gate", ]) def test_create_cg_from_multi_typed_populated_cgsnapshot_v2_4(self): share_name = data_utils.rand_name("tempest-share-name") share_desc = data_utils.rand_name("tempest-share-description") shares = self.create_shares([ {'kwargs': { 'cleanup_in_class': False, 'name': share_name, 'description': share_desc, 'consistency_group_id': self.consistency_group['id'], 'share_type_id': st_id, }} for st_id in (self.share_type['id'], self.share_type2['id']) ]) cg_shares = self.shares_v2_client.list_shares( detailed=True, params={'consistency_group_id': self.consistency_group['id']}, version='2.4', ) cg_share_ids = [s['id'] for s in cg_shares] for share_id in (shares[0]['id'], shares[1]['id']): self.assertIn(share_id, cg_share_ids, 'Share %s not in ' 'consistency group %s.' % (share_id, self.consistency_group['id'])) cgsnap_name = data_utils.rand_name("tempest-cgsnap-name") cgsnap_desc = data_utils.rand_name("tempest-cgsnap-description") cgsnapshot = self.create_cgsnapshot_wait_for_active( self.consistency_group["id"], name=cgsnap_name, description=cgsnap_desc, cleanup_in_class=False, version='2.4', ) self.create_consistency_group(cleanup_in_class=False, source_cgsnapshot_id=cgsnapshot['id'], version='2.4') # TODO(akerr): Skip until bug 1483886 is resolved # Verify that the new shares correspond to correct share types # expected_share_types = [self.share_type['id'], self.share_type2[ # 'id']] # actual_share_types = [s['share_type'] for s in new_cg_shares] # self.assertEqual(sorted(expected_share_types), # sorted(actual_share_types), # 'Expected shares of types %s, got %s.' % ( # sorted(expected_share_types), # sorted(actual_share_types))) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_scheduler_stats.py0000664000567000056710000001231012701407107030143 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class SchedulerStatsAdminTest(base.BaseSharesAdminTest): @test.attr(type=["gate", "smoke", ]) def test_pool_list(self): # List pools pool_response = self.shares_client.list_pools() pool_list = pool_response.get('pools') self.assertIsNotNone(pool_list, 'No pools returned from pools API') self.assertNotEmpty(pool_list) pool = pool_list[0] required_keys = {'name', 'host', 'backend', 'pool'} actual_keys = set(pool.keys()) self.assertTrue(actual_keys.issuperset(required_keys)) @test.attr(type=["gate", "smoke", ]) def test_pool_list_with_filters(self): # List pools pool_response = self.shares_client.list_pools() pool_list = pool_response.get('pools') # Ensure we got at least one pool self.assertIsNotNone(pool_list, 'No pools returned from pools API') self.assertNotEmpty(pool_list) pool = pool_list[0] # Build search opts from data and get pools again with filter search_opts = { 'host': self._wrap_regex_for_exact_match(pool.get('host')), 'backend': self._wrap_regex_for_exact_match(pool.get('backend')), 'pool': self._wrap_regex_for_exact_match(pool.get('pool')), } pool_response = self.shares_client.list_pools( search_opts=search_opts) filtered_pool_list = pool_response.get('pools') # Ensure we got exactly one pool matching the first one from above self.assertEqual(1, len(filtered_pool_list)) # Match the key values, not the timestamp. for k, v in search_opts.items(): self.assertEqual(v[1:-1], filtered_pool_list[0][k]) @test.attr(type=["gate", "smoke", ]) def test_pool_list_with_filters_negative(self): # Build search opts for a non-existent pool search_opts = { 'host': 'foo', 'backend': 'bar', 'pool': 'shark', } pool_response = self.shares_client.list_pools( search_opts=search_opts) pool_list = pool_response.get('pools') # Ensure we got no pools self.assertEmpty(pool_list) @test.attr(type=["gate", "smoke", ]) def test_pool_list_detail(self): # List pools pool_response = self.shares_client.list_pools(detail=True) pool_list = pool_response.get('pools') self.assertIsNotNone(pool_list, 'No pools returned from pools API') self.assertNotEmpty(pool_list) pool = pool_list[0] required_keys = {'name', 'host', 'backend', 'pool', 'capabilities'} actual_keys = set(pool.keys()) self.assertTrue(actual_keys.issuperset(required_keys)) @test.attr(type=["gate", "smoke", ]) def test_pool_list_detail_with_filters(self): # List pools pool_response = self.shares_client.list_pools(detail=True) pool_list = pool_response.get('pools') # Ensure we got at least one pool self.assertIsNotNone(pool_list, 'No pools returned from pools API') self.assertNotEmpty(pool_list) pool = pool_list[0] # Build search opts from data and get pools again with filter search_opts = { 'host': self._wrap_regex_for_exact_match(pool.get('host')), 'backend': self._wrap_regex_for_exact_match(pool.get('backend')), 'pool': self._wrap_regex_for_exact_match(pool.get('pool')), } pool_response = self.shares_client.list_pools( detail=True, search_opts=search_opts) filtered_pool_list = pool_response.get('pools') # Ensure we got exactly one pool matching the first one from above self.assertEqual(1, len(filtered_pool_list)) # Match the key values, not the timestamp. for k, v in search_opts.items(): self.assertEqual(v[1:-1], filtered_pool_list[0][k]) @test.attr(type=["gate", "smoke", ]) def test_pool_list_detail_with_filters_negative(self): # Build search opts for a non-existent pool search_opts = { 'host': 'foo', 'backend': 'bar', 'pool': 'shark', } pool_response = self.shares_client.list_pools( detail=True, search_opts=search_opts) pool_list = pool_response.get('pools') # Ensure we got no pools self.assertEmpty(pool_list) def _wrap_regex_for_exact_match(self, regex): return '^%s$' % regex manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_networks.py0000664000567000056710000000713012701407107030011 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base from manila_tempest_tests.tests.api import test_share_networks class ShareNetworkAdminTest( base.BaseSharesAdminTest, test_share_networks.ShareNetworkListMixin): @classmethod def resource_setup(cls): super(ShareNetworkAdminTest, cls).resource_setup() ss_data = cls.generate_security_service_data() cls.ss_ldap = cls.create_security_service(**ss_data) cls.data_sn_with_ldap_ss = { 'name': 'sn_with_ldap_ss', 'neutron_net_id': '1111', 'neutron_subnet_id': '2222', 'created_at': '2002-02-02', 'updated_at': None, 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'description': 'fake description', } cls.sn_with_ldap_ss = cls.create_share_network( cleanup_in_class=True, **cls.data_sn_with_ldap_ss) cls.shares_client.add_sec_service_to_share_network( cls.sn_with_ldap_ss["id"], cls.ss_ldap["id"]) cls.isolated_client = cls.get_client_with_isolated_creds( type_of_creds='alt') cls.data_sn_with_kerberos_ss = { 'name': 'sn_with_kerberos_ss', 'created_at': '2003-03-03', 'updated_at': None, 'neutron_net_id': 'test net id', 'neutron_subnet_id': 'test subnet id', 'network_type': 'local', 'segmentation_id': 2000, 'cidr': '10.0.0.0/13', 'ip_version': 6, 'description': 'fake description', } cls.ss_kerberos = cls.isolated_client.create_security_service( ss_type='kerberos', **cls.data_sn_with_ldap_ss) cls.sn_with_kerberos_ss = cls.isolated_client.create_share_network( cleanup_in_class=True, **cls.data_sn_with_kerberos_ss) cls.isolated_client.add_sec_service_to_share_network( cls.sn_with_kerberos_ss["id"], cls.ss_kerberos["id"]) @test.attr(type=["gate", "smoke", ]) def test_list_share_networks_all_tenants(self): listed = self.shares_client.list_share_networks_with_detail( {'all_tenants': 1}) self.assertTrue(any(self.sn_with_ldap_ss['id'] == sn['id'] for sn in listed)) self.assertTrue(any(self.sn_with_kerberos_ss['id'] == sn['id'] for sn in listed)) @test.attr(type=["gate", "smoke", ]) def test_list_share_networks_filter_by_project_id(self): listed = self.shares_client.list_share_networks_with_detail( {'project_id': self.sn_with_kerberos_ss['project_id']}) self.assertTrue(any(self.sn_with_kerberos_ss['id'] == sn['id'] for sn in listed)) self.assertTrue(all(self.sn_with_kerberos_ss['project_id'] == sn['project_id'] for sn in listed)) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_shares_actions.py0000664000567000056710000003524112701407107027764 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. All Rights Reserved. # Copyright (c) 2015 Yogesh Kshirsagar. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib.common.utils import data_utils # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class SharesActionsAdminTest(base.BaseSharesAdminTest): """Covers share functionality, that doesn't related to share type.""" @classmethod def resource_setup(cls): super(SharesActionsAdminTest, cls).resource_setup() cls.shares = [] # create share type for share filtering purposes cls.st_name = data_utils.rand_name("tempest-st-name") cls.extra_specs = cls.add_required_extra_specs_to_dict( {'storage_protocol': CONF.share.capability_storage_protocol}) cls.st = cls.create_share_type( name=cls.st_name, cleanup_in_class=True, extra_specs=cls.extra_specs, ) # create share cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.metadata = { 'foo_key_share_1': 'foo_value_share_1', 'bar_key_share_1': 'foo_value_share_1', } cls.share_size = 1 cls.shares.append(cls.create_share( name=cls.share_name, description=cls.share_desc, size=cls.share_size, metadata=cls.metadata, share_type_id=cls.st['share_type']['id'], )) if CONF.share.run_snapshot_tests: # create snapshot cls.snap_name = data_utils.rand_name("tempest-snapshot-name") cls.snap_desc = data_utils.rand_name( "tempest-snapshot-description") cls.snap = cls.create_snapshot_wait_for_active( cls.shares[0]["id"], cls.snap_name, cls.snap_desc) # create second share from snapshot for purposes of sorting and # snapshot filtering cls.share_name2 = data_utils.rand_name("tempest-share-name") cls.share_desc2 = data_utils.rand_name("tempest-share-description") cls.metadata2 = { 'foo_key_share_2': 'foo_value_share_2', 'bar_key_share_2': 'foo_value_share_2', } cls.shares.append(cls.create_share( name=cls.share_name2, description=cls.share_desc2, size=cls.share_size, metadata=cls.metadata2, snapshot_id=cls.snap['id'], )) @test.attr(type=["gate", ]) def test_get_share(self): # get share share = self.shares_client.get_share(self.shares[0]['id']) # verify keys expected_keys = ["status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "name", "snapshot_id", "id", "size"] actual_keys = share.keys() [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, str(share["name"]), msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.share_desc, share["description"]) self.assertEqual(self.share_desc, str(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (self.share_size, share["size"]) self.assertEqual(self.share_size, int(share["size"]), msg) @test.attr(type=["gate", ]) def test_list_shares(self): # list shares shares = self.shares_client.list_shares() # verify keys keys = ["name", "id", "links"] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our share id in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) def test_list_shares_with_detail(self): # list shares shares = self.shares_client.list_shares_with_detail() # verify keys keys = [ "status", "description", "links", "availability_zone", "created_at", "export_location", "share_proto", "host", "name", "snapshot_id", "id", "size", "project_id", ] [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our shares in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_metadata(self): filters = {'metadata': self.metadata} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertDictContainsSubset( filters['metadata'], share['metadata']) if CONF.share.run_snapshot_tests: self.assertFalse(self.shares[1]['id'] in [s['id'] for s in shares]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_extra_specs(self): filters = { "extra_specs": { "storage_protocol": CONF.share.capability_storage_protocol, } } share_type_list = self.shares_client.list_share_types()["share_types"] # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) shares_ids = [s["id"] for s in shares] for share in self.shares: self.assertTrue(share["id"] in shares_ids) for share in shares: # find its name or id, get id st_id = None for st in share_type_list: if share["share_type"] in (st["id"], st["name"]): st_id = st["id"] break if st_id is None: raise ValueError( "Share '%(s_id)s' listed with extra_specs filter has " "nonexistent share type '%(st)s'." % { "s_id": share["id"], "st": share["share_type"]} ) extra_specs = self.shares_client.get_share_type_extra_specs(st_id) self.assertDictContainsSubset(filters["extra_specs"], extra_specs) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_share_type_id(self): filters = {'share_type_id': self.st['share_type']['id']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: st_list = self.shares_client.list_share_types() # find its name or id, get id sts = st_list["share_types"] st_id = None for st in sts: if share["share_type"] in [st["id"], st["name"]]: st_id = st["id"] break if st_id is None: raise ValueError( "Share '%(s_id)s' listed with share_type_id filter has " "nonexistent share type '%(st)s'." % { "s_id": share["id"], "st": share["share_type"]} ) self.assertEqual( filters['share_type_id'], st_id) share_ids = [share['id'] for share in shares] for share in self.shares: self.assertTrue(share['id'] in share_ids) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_host(self): base_share = self.shares_client.get_share(self.shares[0]['id']) filters = {'host': base_share['host']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(filters['host'], share['host']) @test.attr(type=["gate", ]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_list_shares_with_detail_filter_by_share_network_id(self): base_share = self.shares_client.get_share(self.shares[0]['id']) filters = {'share_network_id': base_share['share_network_id']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual( filters['share_network_id'], share['share_network_id']) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_shares_with_detail_filter_by_snapshot_id(self): filters = {'snapshot_id': self.snap['id']} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(filters['snapshot_id'], share['snapshot_id']) self.assertFalse(self.shares[0]['id'] in [s['id'] for s in shares]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_with_asc_sorting(self): filters = {'sort_key': 'created_at', 'sort_dir': 'asc'} # list shares shares = self.shares_client.list_shares_with_detail(params=filters) # verify response self.assertTrue(len(shares) > 0) sorted_list = [share['created_at'] for share in shares] self.assertEqual(sorted(sorted_list), sorted_list) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_existed_name(self): # list shares by name, at least one share is expected params = {"name": self.share_name} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(self.share_name, shares[0]["name"]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_fake_name(self): # list shares by fake name, no shares are expected params = {"name": data_utils.rand_name("fake-nonexistent-name")} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(0, len(shares)) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_active_status(self): # list shares by active status, at least one share is expected params = {"status": "available"} shares = self.shares_client.list_shares_with_detail(params) self.assertTrue(len(shares) > 0) for share in shares: self.assertEqual(params["status"], share["status"]) @test.attr(type=["gate", ]) def test_list_shares_with_detail_filter_by_fake_status(self): # list shares by fake status, no shares are expected params = {"status": 'fake'} shares = self.shares_client.list_shares_with_detail(params) self.assertEqual(0, len(shares)) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_get_snapshot(self): # get snapshot get = self.shares_client.get_snapshot(self.snap["id"]) # verify keys expected_keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size"] actual_keys = get.keys() [self.assertIn(key, actual_keys) for key in expected_keys] # verify data msg = "Expected name: '%s', actual name: '%s'" % (self.snap_name, get["name"]) self.assertEqual(self.snap_name, get["name"], msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.snap_desc, get["description"]) self.assertEqual(self.snap_desc, get["description"], msg) msg = "Expected share_id: '%s', "\ "actual share_id: '%s'" % (self.shares[0]["id"], get["share_id"]) self.assertEqual(self.shares[0]["id"], get["share_id"], msg) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots(self): # list share snapshots snaps = self.shares_client.list_snapshots() # verify keys keys = ["id", "name", "links"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_list_snapshots_with_detail(self): # list share snapshots snaps = self.shares_client.list_snapshots_with_detail() # verify keys keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size"] [self.assertIn(key, sn.keys()) for sn in snaps for key in keys] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_replication.py0000664000567000056710000001767112701407107027277 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yogesh Kshirsagar # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests import clients_share as clients from manila_tempest_tests.common import constants from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base CONF = config.CONF _MIN_SUPPORTED_MICROVERSION = '2.11' @testtools.skipUnless(CONF.share.run_replication_tests, 'Replication tests are disabled.') @base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) class ReplicationAdminTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ReplicationAdminTest, cls).resource_setup() # Create share_type name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) cls.admin_client = clients.AdminManager().shares_v2_client cls.replication_type = CONF.share.backend_replication_type if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: raise share_exceptions.ShareReplicationTypeException( replication_type=cls.replication_type ) cls.zones = cls.get_availability_zones(client=cls.admin_client) cls.share_zone = cls.zones[0] cls.replica_zone = cls.zones[-1] cls.extra_specs = cls.add_required_extra_specs_to_dict( {"replication_type": cls.replication_type}) share_type = cls.create_share_type( name, extra_specs=cls.extra_specs, client=cls.admin_client) cls.share_type = share_type["share_type"] # Create share with above share_type cls.share = cls.create_share(share_type_id=cls.share_type["id"], availability_zone=cls.share_zone,) cls.replica = cls.shares_v2_client.list_share_replicas( share_id=cls.share['id'])[0] @staticmethod def _filter_share_replica_list(replica_list, r_state): # Iterate through replica list to filter based on replica_state return [replica['id'] for replica in replica_list if replica['replica_state'] == r_state] @test.attr(type=["gate", ]) def test_promote_out_of_sync_share_replica(self): """Test promote 'out_of_sync' share replica to active state.""" if (self.replication_type not in constants.REPLICATION_PROMOTION_CHOICES): msg = "Option backend_replication_type should be one of (%s)!" raise self.skipException( msg % ','.join(constants.REPLICATION_PROMOTION_CHOICES)) share = self.create_share(share_type_id=self.share_type['id']) original_replica = self.shares_v2_client.list_share_replicas( share_id=share['id'])[0] # NOTE(Yogi1): Cleanup needs to be disabled for replica that is # being promoted since it will become the 'primary'/'active' replica. replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) # List replicas replica_list = self.admin_client.list_share_replicas( share_id=share['id']) # Check if there is only 1 'active' replica before promotion. active_replicas = self._filter_share_replica_list( replica_list, constants.REPLICATION_STATE_ACTIVE) self.assertEqual(1, len(active_replicas)) # Set replica_state to 'out_of_sync' self.admin_client.reset_share_replica_state( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC, status_attr='replica_state') # Promote 'out_of_sync' replica to 'active' state. self.promote_share_replica(replica['id'], self.admin_client) # Original replica will need to be cleaned up before the promoted # replica can be deleted. self.addCleanup(self.delete_share_replica, original_replica['id']) # Check if there is still only 1 'active' replica after promotion. replica_list = self.shares_v2_client.list_share_replicas( share_id=self.share["id"]) new_active_replicas = self._filter_share_replica_list( replica_list, constants.REPLICATION_STATE_ACTIVE) self.assertEqual(1, len(new_active_replicas)) @test.attr(type=["gate", ]) def test_force_delete_share_replica(self): """Test force deleting a replica that is in 'error_deleting' status.""" replica = self.create_share_replica(self.share['id'], self.replica_zone, cleanup_in_class=False) self.admin_client.reset_share_replica_status( replica['id'], constants.STATUS_ERROR_DELETING) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.STATUS_ERROR_DELETING) self.admin_client.force_delete_share_replica(replica['id']) self.shares_v2_client.wait_for_resource_deletion( replica_id=replica['id']) @test.attr(type=["gate", ]) def test_reset_share_replica_status(self): """Test resetting a replica's 'status' attribute.""" replica = self.create_share_replica(self.share['id'], self.replica_zone, cleanup_in_class=False) self.admin_client.reset_share_replica_status(replica['id'], constants.STATUS_ERROR) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.STATUS_ERROR) @test.attr(type=["gate", ]) def test_reset_share_replica_state(self): """Test resetting a replica's 'replica_state' attribute.""" replica = self.create_share_replica(self.share['id'], self.replica_zone, cleanup_in_class=False) self.admin_client.reset_share_replica_state(replica['id'], constants.STATUS_ERROR) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.STATUS_ERROR, status_attr='replica_state') @test.attr(type=["gate", ]) def test_resync_share_replica(self): """Test resyncing a replica.""" replica = self.create_share_replica(self.share['id'], self.replica_zone, cleanup_in_class=False) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') # Set replica_state to 'out_of_sync'. self.admin_client.reset_share_replica_state( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC, status_attr='replica_state') # Attempt resync self.admin_client.resync_share_replica(replica['id']) self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_admin_actions.py0000664000567000056710000001215312701407107027564 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class AdminActionsTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(AdminActionsTest, cls).resource_setup() cls.states = ["error", "available"] cls.task_states = ["migration_starting", "data_copying_in_progress", "migration_success"] cls.bad_status = "error_deleting" cls.sh = cls.create_share() cls.sh_instance = ( cls.shares_v2_client.get_instances_of_share(cls.sh["id"])[0] ) if CONF.share.run_snapshot_tests: cls.sn = cls.create_snapshot_wait_for_active(cls.sh["id"]) @test.attr(type=["gate", ]) def test_reset_share_state(self): for status in self.states: self.shares_v2_client.reset_state(self.sh["id"], status=status) self.shares_v2_client.wait_for_share_status(self.sh["id"], status) @test.attr(type=["gate", ]) def test_reset_share_instance_state(self): id = self.sh_instance["id"] for status in self.states: self.shares_v2_client.reset_state( id, s_type="share_instances", status=status) self.shares_v2_client.wait_for_share_instance_status(id, status) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_reset_snapshot_state_to_error(self): for status in self.states: self.shares_v2_client.reset_state( self.sn["id"], s_type="snapshots", status=status) self.shares_v2_client.wait_for_snapshot_status( self.sn["id"], status) @test.attr(type=["gate", ]) def test_force_delete_share(self): share = self.create_share() # Change status from 'available' to 'error_deleting' self.shares_v2_client.reset_state(share["id"], status=self.bad_status) # Check that status was changed check_status = self.shares_v2_client.get_share(share["id"]) self.assertEqual(self.bad_status, check_status["status"]) # Share with status 'error_deleting' should be deleted self.shares_v2_client.force_delete(share["id"]) self.shares_v2_client.wait_for_resource_deletion(share_id=share["id"]) @test.attr(type=["gate", ]) def test_force_delete_share_instance(self): share = self.create_share(cleanup_in_class=False) instances = self.shares_v2_client.get_instances_of_share(share["id"]) # Check that instance was created self.assertEqual(1, len(instances)) instance = instances[0] # Change status from 'available' to 'error_deleting' self.shares_v2_client.reset_state( instance["id"], s_type="share_instances", status=self.bad_status) # Check that status was changed check_status = self.shares_v2_client.get_share_instance(instance["id"]) self.assertEqual(self.bad_status, check_status["status"]) # Share with status 'error_deleting' should be deleted self.shares_v2_client.force_delete( instance["id"], s_type="share_instances") self.shares_v2_client.wait_for_resource_deletion( share_instance_id=instance["id"]) @test.attr(type=["gate", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_force_delete_snapshot(self): sn = self.create_snapshot_wait_for_active(self.sh["id"]) # Change status from 'available' to 'error_deleting' self.shares_v2_client.reset_state( sn["id"], s_type="snapshots", status=self.bad_status) # Check that status was changed check_status = self.shares_v2_client.get_snapshot(sn["id"]) self.assertEqual(self.bad_status, check_status["status"]) # Snapshot with status 'error_deleting' should be deleted self.shares_v2_client.force_delete(sn["id"], s_type="snapshots") self.shares_v2_client.wait_for_resource_deletion(snapshot_id=sn["id"]) @test.attr(type=["gate", ]) @base.skip_if_microversion_lt("2.15") def test_reset_share_task_state(self): for task_state in self.task_states: self.shares_v2_client.reset_task_state(self.sh["id"], task_state) self.shares_v2_client.wait_for_share_status( self.sh["id"], task_state, 'task_state') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py0000664000567000056710000002020012701407107031436 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base CONF = config.CONF class AdminActionsNegativeTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(AdminActionsNegativeTest, cls).resource_setup() cls.sh = cls.create_share() cls.sh_instance = ( cls.shares_v2_client.get_instances_of_share(cls.sh["id"])[0] ) if CONF.share.run_snapshot_tests: cls.sn = cls.create_snapshot_wait_for_active(cls.sh["id"]) cls.member_shares_client = clients.Manager().shares_client cls.member_shares_v2_client = clients.Manager().shares_v2_client @test.attr(type=["gate", "negative", ]) def test_reset_nonexistent_share_state(self): self.assertRaises(lib_exc.NotFound, self.shares_client.reset_state, "fake") @test.attr(type=["gate", "negative", ]) def test_reset_nonexistent_share_instance_state(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.reset_state, "fake", s_type="share_instances") @test.attr(type=["gate", "negative", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_reset_nonexistent_snapshot_state(self): self.assertRaises(lib_exc.NotFound, self.shares_client.reset_state, "fake", s_type="snapshots") @test.attr(type=["gate", "negative", ]) def test_reset_share_state_to_unacceptable_state(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.reset_state, self.sh["id"], status="fake") @test.attr(type=["gate", "negative", ]) def test_reset_share_instance_state_to_unacceptable_state(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.reset_state, self.sh_instance["id"], s_type="share_instances", status="fake" ) @test.attr(type=["gate", "negative", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_reset_snapshot_state_to_unacceptable_state(self): self.assertRaises(lib_exc.BadRequest, self.shares_client.reset_state, self.sn["id"], s_type="snapshots", status="fake") @test.attr(type=["gate", "negative", ]) def test_try_reset_share_state_with_member(self): # Even if member from another tenant, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_client.reset_state, self.sh["id"]) @test.attr(type=["gate", "negative", ]) def test_try_reset_share_instance_state_with_member(self): # Even if member from another tenant, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.reset_state, self.sh_instance["id"], s_type="share_instances") @test.attr(type=["gate", "negative", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_try_reset_snapshot_state_with_member(self): # Even if member from another tenant, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_client.reset_state, self.sn["id"], s_type="snapshots") @test.attr(type=["gate", "negative", ]) def test_force_delete_nonexistent_share(self): self.assertRaises(lib_exc.NotFound, self.shares_client.force_delete, "fake") @test.attr(type=["gate", "negative", ]) def test_force_delete_nonexistent_share_instance(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.force_delete, "fake", s_type="share_instances") @test.attr(type=["gate", "negative", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_force_delete_nonexistent_snapshot(self): self.assertRaises(lib_exc.NotFound, self.shares_client.force_delete, "fake", s_type="snapshots") @test.attr(type=["gate", "negative", ]) def test_try_force_delete_share_with_member(self): # If a non-admin tries to do force_delete, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_client.force_delete, self.sh["id"]) @test.attr(type=["gate", "negative", ]) def test_try_force_delete_share_instance_with_member(self): # If a non-admin tries to do force_delete, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.force_delete, self.sh_instance["id"], s_type="share_instances") @test.attr(type=["gate", "negative", ]) @testtools.skipUnless(CONF.share.run_snapshot_tests, "Snapshot tests are disabled.") def test_try_force_delete_snapshot_with_member(self): # If a non-admin tries to do force_delete, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_client.force_delete, self.sn["id"], s_type="snapshots") @test.attr(type=["gate", "negative", ]) def test_try_get_share_instance_with_member(self): # If a non-admin tries to get instance, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.get_share_instance, self.sh_instance["id"]) @test.attr(type=["gate", "negative", ]) def test_try_list_share_instance_with_member(self): # If a non-admin tries to list instances, it should be unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.list_share_instances) @test.attr(type=["gate", "negative", ]) def test_try_get_instances_of_share_with_member(self): # If a non-admin tries to list instances of given share, it should be # unauthorized self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.get_instances_of_share, self.sh['id']) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_reset_task_state_share_not_found(self): self.assertRaises( lib_exc.NotFound, self.shares_v2_client.reset_task_state, 'fake_share', 'migration_error') @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_reset_task_state_empty(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.reset_task_state, self.sh['id'], None) @test.attr(type=["gate", "negative", ]) @base.skip_if_microversion_lt("2.15") def test_reset_task_state_invalid_state(self): self.assertRaises( lib_exc.BadRequest, self.shares_v2_client.reset_task_state, self.sh['id'], 'fake_state') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_consistency_groups_negative.py0000664000567000056710000003360112701407107032577 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupsNegativeTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ConsistencyGroupsNegativeTest, cls).resource_setup() # Create share_type name = data_utils.rand_name("tempest-manila") extra_specs = cls.add_required_extra_specs_to_dict() share_type = cls.create_share_type(name, extra_specs=extra_specs) cls.share_type = share_type['share_type'] # Create a consistency group cls.consistency_group = cls.create_consistency_group( share_type_ids=[cls.share_type['id']]) # Create share inside consistency group cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.share_size = 1 cls.share = cls.create_share( name=cls.share_name, description=cls.share_desc, size=cls.share_size, consistency_group_id=cls.consistency_group['id'], share_type_id=cls.share_type['id'], client=cls.shares_v2_client, ) # Create a cgsnapshot of the consistency group cls.cgsnap_name = data_utils.rand_name("tempest-cgsnap-name") cls.cgsnap_desc = data_utils.rand_name("tempest-cgsnap-description") cls.cgsnapshot = cls.create_cgsnapshot_wait_for_active( cls.consistency_group["id"], name=cls.cgsnap_name, description=cls.cgsnap_desc) @test.attr(type=["negative", "gate", ]) def test_delete_share_type_in_use_by_cg(self): # Attempt delete of share type self.assertRaises(exceptions.BadRequest, self.shares_client.delete_share_type, self.share_type['id']) @test.attr(type=["negative", "gate", ]) def test_create_share_of_unsupported_type_in_cg_v2_4(self): # Attempt to create share of default type in the cg self.assertRaises(exceptions.BadRequest, self.create_share, size=1, consistency_group_id=self.consistency_group['id'], client=self.shares_v2_client, version='2.4') @test.attr(type=["negative", "gate", ]) def test_create_share_in_cg_that_is_not_available_v2_4(self): consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') self.addCleanup(self.shares_v2_client.consistency_group_reset_state, consistency_group['id'], status='available', version='2.4') # creating self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='creating', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'creating') self.assertRaises(exceptions.BadRequest, self.create_share, name=self.share_name, description=self.share_desc, size=self.share_size, consistency_group_id=consistency_group['id'], cleanup_in_class=False, client=self.shares_v2_client, version='2.4') # deleting self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='deleting', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'deleting') self.assertRaises(exceptions.BadRequest, self.create_share, name=self.share_name, description=self.share_desc, size=self.share_size, consistency_group_id=consistency_group['id'], cleanup_in_class=False, client=self.shares_v2_client, version='2.4') # error self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='error', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'error') self.assertRaises(exceptions.BadRequest, self.create_share, name=self.share_name, description=self.share_desc, size=self.share_size, consistency_group_id=consistency_group['id'], cleanup_in_class=False, client=self.shares_v2_client, version='2.4') @test.attr(type=["negative", "gate", ]) def test_create_cgsnapshot_of_cg_that_is_not_available_v2_4(self): consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') self.addCleanup(self.shares_v2_client.consistency_group_reset_state, consistency_group['id'], status='available', version='2.4') # creating self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='creating', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'creating') self.assertRaises(exceptions.Conflict, self.create_cgsnapshot_wait_for_active, consistency_group['id'], cleanup_in_class=False, version='2.4') # deleting self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='deleting', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'deleting') self.assertRaises(exceptions.Conflict, self.create_cgsnapshot_wait_for_active, consistency_group['id'], cleanup_in_class=False, version='2.4') # error self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='error', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'error') self.assertRaises(exceptions.Conflict, self.create_cgsnapshot_wait_for_active, consistency_group['id'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "gate", ]) def test_create_cgsnapshot_of_cg_with_share_in_error_state_v2_4(self): consistency_group = self.create_consistency_group(version='2.4') share_name = data_utils.rand_name("tempest-share-name") share_desc = data_utils.rand_name("tempest-share-description") share_size = 1 share = self.create_share( name=share_name, description=share_desc, size=share_size, consistency_group_id=consistency_group['id'], cleanup_in_class=False, client=self.shares_v2_client, version='2.4', ) self.shares_client.reset_state(s_id=share['id']) self.shares_client.wait_for_share_status(share['id'], 'error') self.assertRaises(exceptions.Conflict, self.create_cgsnapshot_wait_for_active, consistency_group['id'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "gate", ]) def test_delete_cgsnapshot_not_in_available_or_error_v2_4(self): cgsnapshot = self.create_cgsnapshot_wait_for_active( self.consistency_group['id'], cleanup_in_class=False, version='2.4', ) self.addCleanup(self.shares_v2_client.cgsnapshot_reset_state, cgsnapshot['id'], status='available', version='2.4') # creating self.shares_v2_client.cgsnapshot_reset_state(cgsnapshot['id'], status='creating', version='2.4') self.shares_v2_client.wait_for_cgsnapshot_status(cgsnapshot['id'], 'creating') self.assertRaises(exceptions.Conflict, self.shares_v2_client.delete_cgsnapshot, cgsnapshot['id'], version='2.4') # deleting self.shares_v2_client.cgsnapshot_reset_state(cgsnapshot['id'], status='deleting', version='2.4') self.shares_v2_client.wait_for_cgsnapshot_status(cgsnapshot['id'], 'deleting') self.assertRaises(exceptions.Conflict, self.shares_v2_client.delete_cgsnapshot, cgsnapshot['id'], version='2.4') @test.attr(type=["negative", "gate", ]) def test_delete_cg_not_in_available_or_error_v2_4(self): consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') self.addCleanup(self.shares_v2_client.consistency_group_reset_state, consistency_group['id'], status='available', version='2.4') # creating self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='creating', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'creating') self.assertRaises(exceptions.Conflict, self.shares_v2_client.delete_consistency_group, consistency_group['id'], version='2.4') # deleting self.shares_v2_client.consistency_group_reset_state( consistency_group['id'], status='deleting', version='2.4') self.shares_v2_client.wait_for_consistency_group_status( consistency_group['id'], 'deleting') self.assertRaises(exceptions.Conflict, self.shares_v2_client.delete_consistency_group, consistency_group['id'], version='2.4') @test.attr(type=["negative", "gate", ]) def test_create_cg_with_conflicting_share_types_v2_4(self): # Create conflicting share types name = data_utils.rand_name("tempest-manila") extra_specs = {"driver_handles_share_servers": False} share_type = self.create_share_type(name, extra_specs=extra_specs) single_tenant_share_type = share_type['share_type'] name = data_utils.rand_name("tempest-manila") extra_specs = {"driver_handles_share_servers": True} share_type = self.create_share_type(name, extra_specs=extra_specs) multi_tenant_share_type = share_type['share_type'] self.assertRaises(exceptions.BadRequest, self.create_consistency_group, share_type_ids=[single_tenant_share_type['id'], multi_tenant_share_type['id']], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "gate", ]) def test_create_cg_with_multi_tenant_share_type_and_no_share_network_v2_4( self): # Create multi tenant share type name = data_utils.rand_name("tempest-manila") extra_specs = {"driver_handles_share_servers": True} share_type = self.create_share_type(name, extra_specs=extra_specs) multi_tenant_share_type = share_type['share_type'] def create_cg(): cg = self.shares_v2_client.create_consistency_group( share_type_ids=[multi_tenant_share_type['id']], version='2.4' ) resource = { "type": "consistency_group", "id": cg["id"], "client": self.shares_client } self.method_resources.insert(0, resource) return cg self.assertRaises(exceptions.BadRequest, create_cg) @test.attr(type=["negative", "gate", ]) def test_update_cg_share_types(self): consistency_group = self.create_consistency_group( cleanup_in_class=False, version='2.4') self.assertRaises(exceptions.BadRequest, self.shares_v2_client.update_consistency_group, consistency_group['id'], share_types=[self.share_type['id']], version='2.4') manila-2.0.0/manila_tempest_tests/tests/api/admin/test_export_locations.py0000664000567000056710000002041012701407107030343 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_utils import timeutils from oslo_utils import uuidutils import six from tempest import config from tempest import test from manila_tempest_tests import clients_share as clients from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF LATEST_MICROVERSION = CONF.share.max_api_microversion @base.skip_if_microversion_not_supported("2.9") @ddt.ddt class ExportLocationsTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ExportLocationsTest, cls).resource_setup() cls.admin_client = cls.shares_v2_client cls.member_client = clients.Manager().shares_v2_client cls.share = cls.create_share() cls.share = cls.shares_v2_client.get_share(cls.share['id']) cls.share_instances = cls.shares_v2_client.get_instances_of_share( cls.share['id']) def _verify_export_location_structure( self, export_locations, role='admin', version=LATEST_MICROVERSION, format='summary'): # Determine which keys to expect based on role, version and format summary_keys = ['id', 'path'] if utils.is_microversion_ge(version, '2.14'): summary_keys += ['preferred'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] if format == 'summary': if role == 'admin': expected_keys = admin_summary_keys else: expected_keys = summary_keys else: if role == 'admin': expected_keys = admin_detail_keys else: expected_keys = detail_keys if not isinstance(export_locations, (list, tuple, set)): export_locations = (export_locations, ) for export_location in export_locations: # Check that the correct keys are present self.assertEqual(len(expected_keys), len(export_location)) for key in expected_keys: self.assertIn(key, export_location) # Check the format of ever-present summary keys self.assertTrue(uuidutils.is_uuid_like(export_location['id'])) self.assertTrue(isinstance(export_location['path'], six.string_types)) if utils.is_microversion_ge(version, '2.14'): self.assertIn(export_location['preferred'], (True, False)) if role == 'admin': self.assertIn(export_location['is_admin_only'], (True, False)) self.assertTrue(uuidutils.is_uuid_like( export_location['share_instance_id'])) # Check the format of the detail keys if format == 'detail': for time in (export_location['created_at'], export_location['updated_at']): # If var 'time' has incorrect value then ValueError # exception is expected to be raised. So, just try parse # it making assertion that it has proper date value. timeutils.parse_strtime(time) @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.13') def test_list_share_export_locations(self): export_locations = self.admin_client.list_share_export_locations( self.share['id'], version='2.13') self._verify_export_location_structure(export_locations, version='2.13') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.14') def test_list_share_export_locations_with_preferred_flag(self): export_locations = self.admin_client.list_share_export_locations( self.share['id'], version='2.14') self._verify_export_location_structure(export_locations, version='2.14') @test.attr(type=["gate", ]) def test_get_share_export_location(self): export_locations = self.admin_client.list_share_export_locations( self.share['id']) for export_location in export_locations: el = self.admin_client.get_share_export_location( self.share['id'], export_location['id']) self._verify_export_location_structure(el, format='detail') @test.attr(type=["gate", ]) def test_list_share_export_locations_by_member(self): export_locations = self.member_client.list_share_export_locations( self.share['id']) self._verify_export_location_structure(export_locations, role='member') @test.attr(type=["gate", ]) def test_get_share_export_location_by_member(self): export_locations = self.admin_client.list_share_export_locations( self.share['id']) for export_location in export_locations: if export_location['is_admin_only']: continue el = self.member_client.get_share_export_location( self.share['id'], export_location['id']) self._verify_export_location_structure(el, role='member', format='detail') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.13') def test_list_share_instance_export_locations(self): for share_instance in self.share_instances: export_locations = ( self.admin_client.list_share_instance_export_locations( share_instance['id'], version='2.13')) self._verify_export_location_structure(export_locations, version='2.13') @test.attr(type=["gate", ]) @utils.skip_if_microversion_not_supported('2.14') def test_list_share_instance_export_locations_with_preferred_flag(self): for share_instance in self.share_instances: export_locations = ( self.admin_client.list_share_instance_export_locations( share_instance['id'], version='2.14')) self._verify_export_location_structure(export_locations, version='2.14') @test.attr(type=["gate", ]) def test_get_share_instance_export_location(self): for share_instance in self.share_instances: export_locations = ( self.admin_client.list_share_instance_export_locations( share_instance['id'])) for el in export_locations: el = self.admin_client.get_share_instance_export_location( share_instance['id'], el['id']) self._verify_export_location_structure(el, format='detail') @test.attr(type=["gate", ]) def test_share_contains_all_export_locations_of_all_share_instances(self): share_export_locations = self.admin_client.list_share_export_locations( self.share['id']) share_instances_export_locations = [] for share_instance in self.share_instances: share_instance_export_locations = ( self.admin_client.list_share_instance_export_locations( share_instance['id'])) share_instances_export_locations.extend( share_instance_export_locations) self.assertEqual( len(share_export_locations), len(share_instances_export_locations) ) self.assertEqual( sorted(share_export_locations, key=lambda el: el['id']), sorted(share_instances_export_locations, key=lambda el: el['id']) ) manila-2.0.0/manila_tempest_tests/tests/api/admin/test_share_servers.py0000664000567000056710000002716412701407107027637 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import six # noqa from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class ShareServersAdminTest(base.BaseSharesAdminTest): @classmethod def resource_setup(cls): super(ShareServersAdminTest, cls).resource_setup() if not CONF.share.multitenancy_enabled: msg = ("Share servers can be tested only with multitenant drivers." " Skipping.") raise cls.skipException(msg) cls.share = cls.create_share() cls.share_network = cls.shares_client.get_share_network( cls.shares_client.share_network_id) if not cls.share_network["name"]: sn_id = cls.share_network["id"] cls.share_network = cls.shares_client.update_share_network( sn_id, name="sn_%s" % sn_id) cls.sn_name_and_id = [ cls.share_network["name"], cls.share_network["id"], ] # Date should be like '2014-13-12T11:10:09.000000' cls.date_re = re.compile("^([0-9]{4}-[0-9]{2}-[0-9]{2}[A-Z]{1}" "[0-9]{2}:[0-9]{2}:[0-9]{2}).*$") @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_without_filters(self): servers = self.shares_client.list_share_servers() self.assertTrue(len(servers) > 0) keys = [ "id", "host", "status", "share_network_name", "updated_at", "project_id", ] for server in servers: # All expected keys are present for key in keys: self.assertIn(key, server.keys()) # 'Updated at' is valid date if set if server["updated_at"]: self.assertTrue(self.date_re.match(server["updated_at"])) # Host is not empty self.assertTrue(len(server["host"]) > 0) # Id is not empty self.assertTrue(len(server["id"]) > 0) # Project id is not empty self.assertTrue(len(server["project_id"]) > 0) # Do not verify statuses because we get all share servers from whole # cluster and here can be servers with any state. # Server we used is present. any(s["share_network_name"] in self.sn_name_and_id for s in servers) @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_with_host_filter(self): # Get list of share servers and remember 'host' name servers = self.shares_client.list_share_servers() # Remember name of server that was used by this test suite # to be sure it will be still existing. host = "" for server in servers: if server["share_network_name"] in self.sn_name_and_id: if not server["host"]: msg = ("Server '%s' has wrong value for host - " "'%s'.") % (server["id"], server["host"]) raise lib_exc.InvalidContentType(message=msg) host = server["host"] break if not host: msg = ("Appropriate server was not found. Its share_network_data" ": '%s'. List of servers: '%s'.") % (self.sn_name_and_id, str(servers)) raise lib_exc.NotFound(message=msg) search_opts = {"host": host} servers = self.shares_client.list_share_servers(search_opts) self.assertTrue(len(servers) > 0) for server in servers: self.assertEqual(server["host"], host) @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_with_status_filter(self): # Get list of share servers servers = self.shares_client.list_share_servers() # Remember status of server that was used by this test suite # to be sure it will be still existing. status = "" for server in servers: if server["share_network_name"] in self.sn_name_and_id: if not server["status"]: msg = ("Server '%s' has wrong value for status - " "'%s'.") % (server["id"], server["host"]) raise lib_exc.InvalidContentType(message=msg) status = server["status"] break if not status: msg = ("Appropriate server was not found. Its share_network_data" ": '%s'. List of servers: '%s'.") % (self.sn_name_and_id, str(servers)) raise lib_exc.NotFound(message=msg) search_opts = {"status": status} servers = self.shares_client.list_share_servers(search_opts) self.assertTrue(len(servers) > 0) for server in servers: self.assertEqual(server["status"], status) @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_with_project_id_filter(self): search_opts = {"project_id": self.share_network["project_id"]} servers = self.shares_client.list_share_servers(search_opts) # Should exist, at least, one share server, used by this test suite. self.assertTrue(len(servers) > 0) for server in servers: self.assertEqual(server["project_id"], self.share_network["project_id"]) @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_with_share_network_name_filter(self): search_opts = {"share_network": self.share_network["name"]} servers = self.shares_client.list_share_servers(search_opts) # Should exist, at least, one share server, used by this test suite. self.assertTrue(len(servers) > 0) for server in servers: self.assertEqual(server["share_network_name"], self.share_network["name"]) @test.attr(type=["gate", "smoke", ]) def test_list_share_servers_with_share_network_id_filter(self): search_opts = {"share_network": self.share_network["id"]} servers = self.shares_client.list_share_servers(search_opts) # Should exist, at least, one share server, used by this test suite. self.assertTrue(len(servers) > 0) for server in servers: self.assertIn(server["share_network_name"], self.sn_name_and_id) @test.attr(type=["gate", "smoke", ]) def test_show_share_server(self): servers = self.shares_client.list_share_servers() server = self.shares_client.show_share_server(servers[0]["id"]) keys = [ "id", "host", "project_id", "status", "share_network_name", "created_at", "updated_at", "backend_details", ] # all expected keys are present for key in keys: self.assertIn(key, server.keys()) # 'created_at' is valid date self.assertTrue(self.date_re.match(server["created_at"])) # 'updated_at' is valid date if set if server["updated_at"]: self.assertTrue(self.date_re.match(server["updated_at"])) # Host is not empty self.assertTrue(len(server["host"]) > 0) # Id is not empty self.assertTrue(len(server["id"]) > 0) # Project id is not empty self.assertTrue(len(server["project_id"]) > 0) # Status is not empty self.assertTrue(len(server["status"]) > 0) # share_network_name is not empty self.assertTrue(len(server["share_network_name"]) > 0) # backend_details should be a dict self.assertIsInstance(server["backend_details"], dict) @test.attr(type=["gate", "smoke", ]) def test_show_share_server_details(self): servers = self.shares_client.list_share_servers() details = self.shares_client.show_share_server_details( servers[0]["id"]) # If details are present they and their values should be only strings for k, v in details.iteritems(): self.assertIsInstance(k, six.string_types) self.assertIsInstance(v, six.string_types) @test.attr(type=["gate", "smoke", ]) def _delete_share_server(self, delete_share_network): # Get network and subnet from existing share_network and reuse it # to be able to delete share_server after test ends. # TODO(vponomaryov): attach security-services too. If any exist from # donor share-network. new_sn = self.create_share_network( neutron_net_id=self.share_network['neutron_net_id'], neutron_subnet_id=self.share_network['neutron_subnet_id']) # Create server with share self.create_share(share_network_id=new_sn['id']) # List share servers, filtered by share_network_id search_opts = {"share_network": new_sn["id"]} servers = self.shares_client.list_share_servers(search_opts) # There can be more than one share server for share network when retry # was used and share was created successfully not from first time. # So, iterate all share-servers, release all created resources. It will # allow share network to be deleted in cleanup. for serv in servers: # Verify that filtering worked as expected. self.assertEqual(new_sn["id"], serv["share_network_id"]) # List shares by share server id params = {"share_server_id": serv["id"]} shares = self.shares_client.list_shares_with_detail(params) for s in shares: self.assertEqual(new_sn["id"], s["share_network_id"]) # Delete shares, so we will have share server without shares for s in shares: self.shares_client.delete_share(s["id"]) # Wait for shares deletion for s in shares: self.shares_client.wait_for_resource_deletion(share_id=s["id"]) # List shares by share server id, we expect empty list params = {"share_server_id": serv["id"]} empty = self.shares_client.list_shares_with_detail(params) self.assertEqual(0, len(empty)) if delete_share_network: # Delete share network, it should trigger share server deletion self.shares_client.delete_share_network(new_sn["id"]) else: # Delete share server self.shares_client.delete_share_server(serv["id"]) # Wait for share server deletion self.shares_client.wait_for_resource_deletion(server_id=serv["id"]) if delete_share_network: self.shares_client.wait_for_resource_deletion( sn_id=new_sn["id"]) @test.attr(type=["gate", "smoke", ]) def test_delete_share_server(self): self._delete_share_server(False) @test.attr(type=["gate", "smoke", ]) def test_delete_share_server_by_deletion_of_share_network(self): self._delete_share_server(True) manila-2.0.0/manila_tempest_tests/tests/api/test_replication_snapshots.py0000664000567000056710000002131512701407107030277 0ustar jenkinsjenkins00000000000000# Copyright 2016 Yogesh Kshirsagar # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests import clients_share as clients from manila_tempest_tests.common import constants from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base CONF = config.CONF _MIN_SUPPORTED_MICROVERSION = '2.11' @testtools.skipUnless(CONF.share.run_replication_tests, 'Replication tests are disabled.') @testtools.skipUnless(CONF.share.run_snapshot_tests, 'Snapshot tests disabled.') @base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) class ReplicationSnapshotTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ReplicationSnapshotTest, cls).resource_setup() # Create share_type name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) cls.admin_client = clients.AdminManager().shares_v2_client cls.replication_type = CONF.share.backend_replication_type if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: raise share_exceptions.ShareReplicationTypeException( replication_type=cls.replication_type ) cls.zones = cls.get_availability_zones(client=cls.admin_client) cls.share_zone = cls.zones[0] cls.replica_zone = cls.zones[-1] cls.extra_specs = cls.add_required_extra_specs_to_dict( {"replication_type": cls.replication_type}) share_type = cls.create_share_type( name, extra_specs=cls.extra_specs, client=cls.admin_client) cls.share_type = share_type["share_type"] # Create share with above share_type cls.creation_data = {'kwargs': { 'share_type_id': cls.share_type['id'], 'availability_zone': cls.share_zone, }} @test.attr(type=["gate", ]) def test_snapshot_after_share_replica(self): """Test the snapshot for replicated share. Create replica first and then create a snapshot. Verify that the snapshot is properly created under replica by creating a share from that snapshot. """ share = self.create_share(share_type_id=self.share_type['id'], availability_zone=self.share_zone) original_replica = self.shares_v2_client.list_share_replicas( share["id"])[0] share_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) self.shares_v2_client.wait_for_share_replica_status( share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') snapshot = self.create_snapshot_wait_for_active(share["id"]) self.promote_share_replica(share_replica['id']) self.delete_share_replica(original_replica['id']) self.create_share(snapshot_id=snapshot['id']) @test.attr(type=["gate", ]) def test_snapshot_before_share_replica(self): """Test the snapshot for replicated share. Create snapshot before creating share replica for the same share. Verify snapshot by creating share from the snapshot. """ share = self.create_share(share_type_id=self.share_type['id'], availability_zone=self.share_zone) snapshot = self.create_snapshot_wait_for_active(share["id"]) original_replica = self.shares_v2_client.list_share_replicas( share["id"])[0] share_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) self.shares_v2_client.wait_for_share_replica_status( share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') # Wait for snapshot1 to become available self.shares_v2_client.wait_for_snapshot_status( snapshot['id'], "available") self.promote_share_replica(share_replica['id']) self.delete_share_replica(original_replica['id']) self.create_share(snapshot_id=snapshot['id']) @test.attr(type=["gate", ]) def test_snapshot_before_and_after_share_replica(self): """Test the snapshot for replicated share. Verify that snapshot can be created before and after share replica being created. Verify snapshots by creating share from the snapshots. """ share = self.create_share(share_type_id=self.share_type['id'], availability_zone=self.share_zone) snapshot1 = self.create_snapshot_wait_for_active(share["id"]) original_replica = self.shares_v2_client.list_share_replicas( share["id"])[0] share_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) self.shares_v2_client.wait_for_share_replica_status( share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') snapshot2 = self.create_snapshot_wait_for_active(share["id"]) # Wait for snapshot1 to become available self.shares_v2_client.wait_for_snapshot_status( snapshot1['id'], "available") self.promote_share_replica(share_replica['id']) # Remove the original active replica to ensure that snapshot is # still being created successfully. self.delete_share_replica(original_replica['id']) self.create_share(snapshot_id=snapshot1['id']) self.create_share(snapshot_id=snapshot2['id']) @test.attr(type=["gate", ]) def test_delete_snapshot_after_adding_replica(self): """Verify the snapshot delete. Ensure that deleting the original snapshot also deletes the snapshot from replica. """ share = self.create_share(share_type_id=self.share_type['id'], availability_zone=self.share_zone) share_replica = self.create_share_replica(share["id"], self.replica_zone) self.shares_v2_client.wait_for_share_replica_status( share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') snapshot = self.create_snapshot_wait_for_active(share["id"]) self.shares_v2_client.delete_snapshot(snapshot['id']) self.shares_v2_client.wait_for_resource_deletion( snapshot_id=snapshot["id"]) @test.attr(type=["gate", ]) def test_create_replica_from_snapshot_share(self): """Test replica for a share that was created from snapshot.""" share = self.create_share(share_type_id=self.share_type['id'], availability_zone=self.share_zone) orig_snapshot = self.create_snapshot_wait_for_active(share["id"]) snap_share = self.create_share(snapshot_id=orig_snapshot['id']) original_replica = self.shares_v2_client.list_share_replicas( snap_share["id"])[0] share_replica = self.create_share_replica(snap_share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) self.shares_v2_client.wait_for_share_replica_status( share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') self.promote_share_replica(share_replica['id']) # Delete the demoted replica so promoted replica can be cleaned # during the cleanup self.delete_share_replica(original_replica['id']) manila-2.0.0/manila_tempest_tests/tests/api/test_share_networks_negative.py0000664000567000056710000001270212701407107030604 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config # noqa from tempest.lib import exceptions as lib_exc # noqa from tempest import test # noqa import testtools # noqa from manila_tempest_tests.tests.api import base CONF = config.CONF class ShareNetworksNegativeTest(base.BaseSharesTest): @test.attr(type=["gate", "smoke", "negative"]) def test_try_get_share_network_without_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_network, "") @test.attr(type=["gate", "smoke", "negative"]) def test_try_get_share_network_with_wrong_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.get_share_network, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_share_network_without_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_network, "") @test.attr(type=["gate", "smoke", "negative"]) def test_try_delete_share_network_with_wrong_type(self): self.assertRaises(lib_exc.NotFound, self.shares_client.delete_share_network, "wrong_id") @test.attr(type=["gate", "smoke", "negative"]) def test_try_update_nonexistant_share_network(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_network, "wrong_id", name="name") @test.attr(type=["gate", "smoke", "negative"]) def test_try_update_share_network_with_empty_id(self): self.assertRaises(lib_exc.NotFound, self.shares_client.update_share_network, "", name="name") @test.attr(type=["gate", "smoke", "negative"]) @testtools.skipIf( not CONF.share.multitenancy_enabled, "Only for multitenancy.") def test_try_update_invalid_keys_sh_server_exists(self): self.create_share(cleanup_in_class=False) self.assertRaises(lib_exc.Forbidden, self.shares_client.update_share_network, self.shares_client.share_network_id, neutron_net_id="new_net_id") @test.attr(type=["gate", "smoke", "negative"]) def test_try_get_deleted_share_network(self): data = self.generate_share_network_data() sn = self.create_share_network(**data) self.assertDictContainsSubset(data, sn) self.shares_client.delete_share_network(sn["id"]) # try get deleted share network entity self.assertRaises(lib_exc.NotFound, self.shares_client.get_security_service, sn["id"]) @test.attr(type=["gate", "smoke", "negative"]) def test_try_list_share_networks_all_tenants(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_share_networks_with_detail, params={'all_tenants': 1}) @test.attr(type=["gate", "smoke", "negative"]) def test_try_list_share_networks_project_id(self): self.assertRaises(lib_exc.Forbidden, self.shares_client.list_share_networks_with_detail, params={'project_id': 'some_project'}) @test.attr(type=["gate", "smoke", "negative"]) def test_try_list_share_networks_wrong_created_since_value(self): self.assertRaises( lib_exc.BadRequest, self.shares_client.list_share_networks_with_detail, params={'created_since': '2014-10-23T08:31:58.000000'}) @test.attr(type=["gate", "smoke", "negative"]) def test_try_list_share_networks_wrong_created_before_value(self): self.assertRaises( lib_exc.BadRequest, self.shares_client.list_share_networks_with_detail, params={'created_before': '2014-10-23T08:31:58.000000'}) @test.attr(type=["gate", "smoke", "negative"]) @testtools.skipIf(not CONF.share.multitenancy_enabled, 'Can run only with drivers that do handle share servers ' 'creation. Skipping.') def test_try_delete_share_network_with_existing_shares(self): # Get valid network data for successful share creation share_network = self.shares_client.get_share_network( self.shares_client.share_network_id) new_sn = self.create_share_network( neutron_net_id=share_network['neutron_net_id'], neutron_subnet_id=share_network['neutron_subnet_id'], nova_net_id=share_network['nova_net_id'], cleanup_in_class=False) # Create share with share network self.create_share( share_network_id=new_sn['id'], cleanup_in_class=False) # Try delete share network self.assertRaises( lib_exc.Conflict, self.shares_client.delete_share_network, new_sn['id']) manila-2.0.0/manila_tempest_tests/tests/api/test_replication.py0000664000567000056710000004266712701407107026212 0ustar jenkinsjenkins00000000000000# Copyright 2015 Yogesh Kshirsagar # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest import test import testtools from manila_tempest_tests import clients_share as clients from manila_tempest_tests.common import constants from manila_tempest_tests import share_exceptions from manila_tempest_tests.tests.api import base from manila_tempest_tests import utils CONF = config.CONF _MIN_SUPPORTED_MICROVERSION = '2.11' SUMMARY_KEYS = ['share_id', 'id', 'replica_state', 'status'] DETAIL_KEYS = SUMMARY_KEYS + ['availability_zone', 'host', 'updated_at', 'share_network_id', 'created_at'] @testtools.skipUnless(CONF.share.run_replication_tests, 'Replication tests are disabled.') @base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) class ReplicationTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ReplicationTest, cls).resource_setup() # Create share_type name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) cls.admin_client = clients.AdminManager().shares_v2_client cls.replication_type = CONF.share.backend_replication_type if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: raise share_exceptions.ShareReplicationTypeException( replication_type=cls.replication_type ) cls.zones = cls.get_availability_zones(client=cls.admin_client) cls.share_zone = cls.zones[0] cls.replica_zone = cls.zones[-1] cls.extra_specs = cls.add_required_extra_specs_to_dict( {"replication_type": cls.replication_type}) share_type = cls.create_share_type( name, extra_specs=cls.extra_specs, client=cls.admin_client) cls.share_type = share_type["share_type"] # Create share with above share_type cls.creation_data = {'kwargs': { 'share_type_id': cls.share_type['id'], 'availability_zone': cls.share_zone, }} # Data for creating shares in parallel data = [cls.creation_data, cls.creation_data] cls.shares = cls.create_shares(data) cls.shares = [cls.shares_v2_client.get_share(s['id']) for s in cls.shares] cls.instance_id1 = cls._get_instance(cls.shares[0]) cls.instance_id2 = cls._get_instance(cls.shares[1]) cls.access_type = "ip" cls.access_to = utils.rand_ip() @classmethod def _get_instance(cls, share): share_instances = cls.admin_client.get_instances_of_share(share["id"]) return share_instances[0]["id"] def _verify_create_replica(self): # Create the replica share_replica = self.create_share_replica(self.shares[0]["id"], self.replica_zone, cleanup_in_class=False) share_replicas = self.shares_v2_client.list_share_replicas( share_id=self.shares[0]["id"]) # Ensure replica is created successfully. replica_ids = [replica["id"] for replica in share_replicas] self.assertIn(share_replica["id"], replica_ids) return share_replica def _verify_active_replica_count(self, share_id): # List replicas replica_list = self.shares_v2_client.list_share_replicas( share_id=share_id) # Check if there is only 1 'active' replica before promotion. active_replicas = self._filter_replica_list( replica_list, constants.REPLICATION_STATE_ACTIVE) self.assertEqual(1, len(active_replicas)) def _filter_replica_list(self, replica_list, r_state): # Iterate through replica list to filter based on replica_state return [replica for replica in replica_list if replica['replica_state'] == r_state] def _verify_config_and_set_access_rule_data(self): """Verify the access rule configuration is enabled for NFS. Set the data after verification. """ protocol = self.shares_v2_client.share_protocol # TODO(Yogi1): Add access rules for other protocols. if not ((protocol.lower() == 'nfs') and (protocol in CONF.share.enable_ip_rules_for_protocols) and CONF.share.enable_ip_rules_for_protocols): message = "IP access rules are not supported for this protocol." raise self.skipException(message) access_type = "ip" access_to = utils.rand_ip() return access_type, access_to @test.attr(type=["gate", ]) def test_add_delete_share_replica(self): # Create the replica share_replica = self._verify_create_replica() # Delete the replica self.delete_share_replica(share_replica["id"]) @test.attr(type=["gate", ]) def test_add_access_rule_create_replica_delete_rule(self): # Add access rule to the share access_type, access_to = self._verify_config_and_set_access_rule_data() rule = self.shares_v2_client.create_access_rule( self.shares[0]["id"], access_type, access_to, 'ro') self.shares_v2_client.wait_for_access_rule_status( self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE) # Create the replica self._verify_create_replica() # Verify access_rules_status transitions to 'active' state. self.shares_v2_client.wait_for_share_status( self.shares[0]["id"], constants.RULE_STATE_ACTIVE, status_attr='access_rules_status') # Delete rule and wait for deletion self.shares_v2_client.delete_access_rule(self.shares[0]["id"], rule["id"]) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.shares[0]['id']) @test.attr(type=["gate", ]) def test_create_replica_add_access_rule_delete_replica(self): access_type, access_to = self._verify_config_and_set_access_rule_data() # Create the replica share_replica = self._verify_create_replica() # Add access rule self.shares_v2_client.create_access_rule( self.shares[0]["id"], access_type, access_to, 'ro') self.shares_v2_client.wait_for_share_status( self.shares[0]["id"], constants.RULE_STATE_ACTIVE, status_attr='access_rules_status') # Delete the replica self.delete_share_replica(share_replica["id"]) @test.attr(type=["gate", ]) def test_add_multiple_share_replicas(self): rep_domain, pools = self.get_pools_for_replication_domain() if len(pools) < 3: msg = ("Replication domain %(domain)s has only %(count)s pools. " "Need at least 3 pools to run this test." % {"domain": rep_domain, "count": len(pools)}) raise self.skipException(msg) # Add the replicas share_replica1 = self.create_share_replica(self.shares[0]["id"], self.replica_zone, cleanup_in_class=False) share_replica2 = self.create_share_replica(self.shares[0]["id"], self.replica_zone, cleanup_in_class=False) self.shares_v2_client.get_share_replica(share_replica2['id']) share_replicas = self.shares_v2_client.list_share_replicas( share_id=self.shares[0]["id"]) replica_host_set = {r['host'] for r in share_replicas} # Assert that replicas are created on different pools. msg = "More than one replica is created on the same pool." self.assertEqual(3, len(replica_host_set), msg) # Verify replicas are in the replica list replica_ids = [replica["id"] for replica in share_replicas] self.assertIn(share_replica1["id"], replica_ids) self.assertIn(share_replica2["id"], replica_ids) @test.attr(type=["gate", ]) def test_promote_in_sync_share_replica(self): # Test promote 'in_sync' share_replica to 'active' state if (self.replication_type not in constants.REPLICATION_PROMOTION_CHOICES): msg = "Option backend_replication_type should be one of (%s)!" raise self.skipException( msg % ','.join(constants.REPLICATION_PROMOTION_CHOICES)) share = self.create_shares([self.creation_data])[0] original_replica = self.shares_v2_client.list_share_replicas( share["id"])[0] # NOTE(Yogi1): Cleanup needs to be disabled for replica that is # being promoted since it will become the 'primary'/'active' replica. replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) # Wait for replica state to update after creation self.shares_v2_client.wait_for_share_replica_status( replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') # Promote the first in_sync replica to active state promoted_replica = self.promote_share_replica(replica['id']) # Delete the demoted replica so promoted replica can be cleaned # during the cleanup of the share. self.addCleanup(self.delete_share_replica, original_replica['id']) self._verify_active_replica_count(share["id"]) # Verify the replica_state for promoted replica promoted_replica = self.shares_v2_client.get_share_replica( promoted_replica["id"]) self.assertEqual(constants.REPLICATION_STATE_ACTIVE, promoted_replica["replica_state"]) @test.attr(type=["gate", ]) def test_promote_and_promote_back(self): # Test promote back and forth between 2 share replicas if (self.replication_type not in constants.REPLICATION_PROMOTION_CHOICES): msg = "Option backend_replication_type should be one of (%s)!" raise self.skipException( msg % ','.join(constants.REPLICATION_PROMOTION_CHOICES)) # Create a new share share = self.create_shares([self.creation_data])[0] # Discover the original replica initial_replicas = self.shares_v2_client.list_share_replicas( share_id=share['id']) self.assertEqual(1, len(initial_replicas), '%s replicas initially created for share %s' % (len(initial_replicas), share['id'])) original_replica = initial_replicas[0] # Create a new replica new_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup_in_class=False) self.shares_v2_client.wait_for_share_replica_status( new_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') # Promote the new replica to active and verify the replica states self.promote_share_replica(new_replica['id']) self._verify_active_replica_count(share["id"]) self.shares_v2_client.wait_for_share_replica_status( original_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') # Promote the original replica back to active self.promote_share_replica(original_replica['id']) self._verify_active_replica_count(share["id"]) self.shares_v2_client.wait_for_share_replica_status( new_replica['id'], constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state') @test.attr(type=["gate", ]) def test_active_replication_state(self): # Verify the replica_state of first instance is set to active. replica = self.shares_v2_client.get_share_replica(self.instance_id1) self.assertEqual( constants.REPLICATION_STATE_ACTIVE, replica['replica_state']) @testtools.skipUnless(CONF.share.run_replication_tests, 'Replication tests are disabled.') @base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) class ReplicationActionsTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ReplicationActionsTest, cls).resource_setup() # Create share_type name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) cls.admin_client = clients.AdminManager().shares_v2_client cls.replication_type = CONF.share.backend_replication_type if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: raise share_exceptions.ShareReplicationTypeException( replication_type=cls.replication_type ) cls.zones = cls.get_availability_zones(client=cls.admin_client) cls.share_zone = cls.zones[0] cls.replica_zone = cls.zones[-1] cls.extra_specs = cls.add_required_extra_specs_to_dict( {"replication_type": cls.replication_type}) share_type = cls.create_share_type( name, extra_specs=cls.extra_specs, client=cls.admin_client) cls.share_type = share_type["share_type"] # Create share with above share_type cls.creation_data = {'kwargs': { 'share_type_id': cls.share_type['id'], 'availability_zone': cls.share_zone, }} # Data for creating shares in parallel data = [cls.creation_data, cls.creation_data] cls.shares = cls.create_shares(data) cls.shares = [cls.shares_v2_client.get_share(s['id']) for s in cls.shares] cls.instance_id1 = cls._get_instance(cls.shares[0]) cls.instance_id2 = cls._get_instance(cls.shares[1]) # Create replicas to 2 shares cls.replica1 = cls.create_share_replica(cls.shares[0]["id"], cls.replica_zone, cleanup_in_class=True) cls.replica2 = cls.create_share_replica(cls.shares[1]["id"], cls.replica_zone, cleanup_in_class=True) @classmethod def _get_instance(cls, share): share_instances = cls.admin_client.get_instances_of_share(share["id"]) return share_instances[0]["id"] def _validate_replica_list(self, replica_list, detail=True): # Verify keys if detail: keys = DETAIL_KEYS else: keys = SUMMARY_KEYS for replica in replica_list: self.assertEqual(sorted(keys), sorted(replica.keys())) # Check for duplicates replica_id_list = [sr["id"] for sr in replica_list if sr["id"] == replica["id"]] msg = "Replica %s appears %s times in replica list." % ( replica['id'], len(replica_id_list)) self.assertEqual(1, len(replica_id_list), msg) @test.attr(type=["gate", ]) def test_show_share_replica(self): replica = self.shares_v2_client.get_share_replica(self.replica1["id"]) actual_keys = sorted(list(replica.keys())) detail_keys = sorted(DETAIL_KEYS) self.assertEqual(detail_keys, actual_keys, 'Share Replica %s has incorrect keys; ' 'expected %s, got %s.' % (replica["id"], detail_keys, actual_keys)) @test.attr(type=["gate", ]) def test_detail_list_share_replicas_for_share(self): # List replicas for share replica_list = self.shares_v2_client.list_share_replicas( share_id=self.shares[0]["id"]) replica_ids_list = [rep['id'] for rep in replica_list] self.assertIn(self.replica1['id'], replica_ids_list, 'Replica %s was not returned in the list of replicas: %s' % (self.replica1['id'], replica_list)) # Verify keys self._validate_replica_list(replica_list) @test.attr(type=["gate", ]) def test_detail_list_share_replicas_for_all_shares(self): # List replicas for all available shares replica_list = self.shares_v2_client.list_share_replicas() replica_ids_list = [rep['id'] for rep in replica_list] for replica in [self.replica1, self.replica2]: self.assertIn(replica['id'], replica_ids_list, 'Replica %s was not returned in the list of ' 'replicas: %s' % (replica['id'], replica_list)) # Verify keys self._validate_replica_list(replica_list) @test.attr(type=["gate", ]) def test_summary_list_share_replicas_for_all_shares(self): # List replicas replica_list = self.shares_v2_client.list_share_replicas_summary() # Verify keys self._validate_replica_list(replica_list, detail=False) manila-2.0.0/manila_tempest_tests/tests/api/test_consistency_groups_negative.py0000664000567000056710000002332412701407107031510 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test import testtools from manila_tempest_tests.tests.api import base CONF = config.CONF @testtools.skipUnless(CONF.share.run_consistency_group_tests, 'Consistency Group tests disabled.') class ConsistencyGroupsNegativeTest(base.BaseSharesTest): @classmethod def resource_setup(cls): super(ConsistencyGroupsNegativeTest, cls).resource_setup() # Create a consistency group cls.cg_name = data_utils.rand_name("tempest-cg-name") cls.cg_desc = data_utils.rand_name("tempest-cg-description") cls.consistency_group = cls.create_consistency_group( name=cls.cg_name, description=cls.cg_desc ) # Create a share in the consistency group cls.share_name = data_utils.rand_name("tempest-share-name") cls.share_desc = data_utils.rand_name("tempest-share-description") cls.share_size = 1 cls.share = cls.create_share( name=cls.share_name, description=cls.share_desc, size=cls.share_size, consistency_group_id=cls.consistency_group['id'], client=cls.shares_v2_client ) # Create a cgsnapshot of the consistency group cls.cgsnap_name = data_utils.rand_name("tempest-cgsnap-name") cls.cgsnap_desc = data_utils.rand_name("tempest-cgsnap-description") cls.cgsnapshot = cls.create_cgsnapshot_wait_for_active( cls.consistency_group["id"], name=cls.cgsnap_name, description=cls.cgsnap_desc) @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_invalid_source_cgsnapshot_id_value_v2_4( self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, source_cgsnapshot_id='foobar', cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_nonexistent_source_cgsnapshot_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, source_cgsnapshot_id=self.share['id'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_invalid_share_network_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, share_network_id='foobar', cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_nonexistent_share_network_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, share_network_id=self.share['id'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_invalid_share_type_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, share_type_ids=['foobar'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cg_with_nonexistent_share_type_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_consistency_group, share_type_ids=[self.share['id']], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cgsnapshot_with_invalid_cg_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_cgsnapshot_wait_for_active, 'foobar', cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_create_cgsnapshot_with_nonexistent_cg_id_value_v2_4(self): self.assertRaises(lib_exc.BadRequest, self.create_cgsnapshot_wait_for_active, self.share['id'], cleanup_in_class=False, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_get_cg_with_wrong_id_v2_4(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_consistency_group, "wrong_consistency_group_id", version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_get_cg_without_passing_cg_id_v2_4(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_consistency_group, '', version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_update_cg_with_wrong_id_v2_4(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.update_consistency_group, 'wrong_consistency_group_id', name='new_name', description='new_description', version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_delete_cg_with_wrong_id_v2_4(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.delete_consistency_group, "wrong_consistency_group_id", version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_delete_cg_without_passing_cg_id_v2_4(self): self.assertRaises(lib_exc.NotFound, self.shares_v2_client.delete_consistency_group, '', version='2.4') @test.attr(type=["negative", "gate", ]) def test_delete_cg_in_use_by_cgsnapshot_v2_4(self): # Attempt delete of share type self.assertRaises(lib_exc.Conflict, self.shares_v2_client.delete_consistency_group, self.consistency_group['id'], version='2.4') @test.attr(type=["negative", "gate", ]) def test_delete_share_in_use_by_cgsnapshot_v2_4(self): # Attempt delete of share type params = {'consistency_group_id': self.share['consistency_group_id']} self.assertRaises(lib_exc.Forbidden, self.shares_v2_client.delete_share, self.share['id'], params=params, version='2.4') @test.attr(type=["negative", "smoke", "gate", ]) def test_delete_cg_containing_a_share_v2_4(self): self.assertRaises(lib_exc.Conflict, self.shares_v2_client.delete_consistency_group, self.consistency_group['id'], version='2.4') # Verify consistency group is not put into error state from conflict cg = self.shares_v2_client.get_consistency_group( self.consistency_group['id'], version='2.4') self.assertEqual('available', cg['status']) @test.attr(type=["negative", "smoke", "gate", ]) def test_filter_shares_on_invalid_cg_id_v2_4(self): shares = self.shares_v2_client.list_shares( detailed=True, params={'consistency_group_id': 'foobar'}, version='2.4' ) self.assertEqual(0, len(shares), 'Incorrect number of shares returned. Expected 0, ' 'got %s.' % len(shares)) @test.attr(type=["negative", "smoke", "gate", ]) def test_filter_shares_on_nonexistent_cg_id_v2_4(self): shares = self.shares_v2_client.list_shares( detailed=True, params={'consistency_group_id': self.share['id']}, version='2.4' ) self.assertEqual(0, len(shares), 'Incorrect number of shares returned. Expected 0, ' 'got %s.' % len(shares)) @test.attr(type=["negative", "smoke", "gate", ]) def test_filter_shares_on_empty_cg_id_v2_4(self): consistency_group = self.create_consistency_group( name='tempest_cg', description='tempest_cg_desc', cleanup_in_class=False, version='2.4', ) shares = self.shares_v2_client.list_shares( detailed=True, params={'consistency_group_id': consistency_group['id']}, version='2.4', ) self.assertEqual(0, len(shares), 'Incorrect number of shares returned. Expected 0, ' 'got %s.' % len(shares)) manila-2.0.0/manila_tempest_tests/tests/api/test_extensions.py0000664000567000056710000000206412701407107026063 0ustar jenkinsjenkins00000000000000# Copyright 2014 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test # noqa from manila_tempest_tests.tests.api import base class ExtensionsTest(base.BaseSharesTest): @test.attr(type=["smoke", "gate"]) def test_extensions(self): # get extensions extensions = self.shares_client.list_extensions() # verify response keys = ["alias", "updated", "name", "description"] [self.assertIn(key, ext.keys()) for ext in extensions for key in keys] manila-2.0.0/manila_tempest_tests/tests/scenario/0000775000567000056710000000000012701407265023310 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/scenario/__init__.py0000664000567000056710000000000012701407107025402 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/tests/scenario/manager_share.py0000664000567000056710000001762712701407107026466 0ustar jenkinsjenkins00000000000000# Copyright 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from tempest.common.utils.linux import remote_client # noqa from tempest import config # noqa from tempest.lib.common.utils import data_utils from tempest.scenario import manager # noqa from manila_tempest_tests import clients_share CONF = config.CONF LOG = log.getLogger(__name__) class ShareScenarioTest(manager.NetworkScenarioTest): """Provide harness to do Manila scenario tests.""" @classmethod def resource_setup(cls): cls.set_network_resources() super(ShareScenarioTest, cls).resource_setup() # Manila clients cls.shares_client = clients_share.Manager().shares_client cls.shares_v2_client = clients_share.Manager().shares_v2_client cls.shares_admin_client = clients_share.AdminManager().shares_client cls.shares_admin_v2_client = ( clients_share.AdminManager().shares_v2_client) def _create_share(self, share_protocol=None, size=1, name=None, snapshot_id=None, description=None, metadata=None, share_network_id=None, share_type_id=None, client=None, cleanup_in_class=True): """Create a share :param share_protocol: NFS or CIFS :param size: size in GB :param name: name of the share (otherwise random) :param snapshot_id: snapshot as basis for the share :param description: description of the share :param metadata: adds additional metadata :param share_network_id: id of network to be used :param share_type_id: type of the share to be created :param client: client object :param cleanup_in_class: default: True :returns: a created share """ client = client or self.shares_client description = description or "Tempest's share" if not name: name = data_utils.rand_name("manila-scenario") if CONF.share.multitenancy_enabled: share_network_id = (share_network_id or client.share_network_id) else: share_network_id = None metadata = metadata or {} kwargs = { 'share_protocol': share_protocol, 'size': size, 'name': name, 'snapshot_id': snapshot_id, 'description': description, 'metadata': metadata, 'share_network_id': share_network_id, 'share_type_id': share_type_id, } share = self.shares_client.create_share(**kwargs) self.addCleanup(client.wait_for_resource_deletion, share_id=share['id']) self.addCleanup(client.delete_share, share['id']) client.wait_for_share_status(share['id'], 'available') return share def _wait_for_share_server_deletion(self, sn_id, client=None): """Wait for a share server to be deleted :param sn_id: shared network id :param client: client object """ client = client or self.shares_admin_client servers = client.list_share_servers( search_opts={"share_network": sn_id}) for server in servers: client.delete_share_server(server['id']) client.wait_for_resource_deletion(server_id=server['id']) def _create_share_network(self, client=None, **kwargs): """Create a share network :param client: client object :returns: a created share network """ client = client or self.shares_client sn = client.create_share_network(**kwargs) self.addCleanup(client.wait_for_resource_deletion, sn_id=sn['id']) self.addCleanup(client.delete_share_network, sn['id']) self.addCleanup(self._wait_for_share_server_deletion, sn['id']) return sn def _allow_access(self, share_id, client=None, access_type="ip", access_to="0.0.0.0", cleanup=True): """Allow share access :param share_id: id of the share :param client: client object :param access_type: "ip", "user" or "cert" :param access_to :returns: access object """ client = client or self.shares_client access = client.create_access_rule(share_id, access_type, access_to) # NOTE(u_glide): Ignore provided client, because we always need v2 # client to make this call self.shares_v2_client.wait_for_share_status( share_id, "active", status_attr='access_rules_status') if cleanup: self.addCleanup(client.delete_access_rule, share_id, access['id']) return access def _create_router_interface(self, subnet_id, client=None, tenant_id=None, router_id=None): """Create a router interface :param subnet_id: id of the subnet :param client: client object :param tenant_id """ if not client: client = self.network_client if not tenant_id: tenant_id = client.tenant_id if not router_id: router_id = self._get_router()['id'] client.add_router_interface(router_id, subnet_id=subnet_id) self.addCleanup( client.remove_router_interface, router_id, subnet_id=subnet_id) def get_remote_client(self, *args, **kwargs): if not CONF.share.image_with_share_tools: return super(ShareScenarioTest, self).get_remote_client(*args, **kwargs) # NOTE(u_glide): We need custom implementation of this method until # original implementation depends on CONF.compute.ssh_auth_method # option. server_or_ip = kwargs['server_or_ip'] if isinstance(server_or_ip, six.string_types): ip = server_or_ip else: addr = server_or_ip['addresses'][CONF.compute.network_for_ssh][0] ip = addr['addr'] # NOTE(u_glide): Both options (pkey and password) are required here to # support service images without Nova metadata support client_params = { 'username': kwargs['username'], 'password': CONF.share.image_password, 'pkey': kwargs.get('private_key'), } linux_client = remote_client.RemoteClient(ip, **client_params) try: linux_client.validate_authentication() except Exception: LOG.exception('Initializing SSH connection to %s failed' % ip) self._log_console_output() raise return linux_client def _migrate_share(self, share_id, dest_host, client=None): client = client or self.shares_admin_v2_client client.migrate_share(share_id, dest_host, True) share = client.wait_for_migration_status(share_id, dest_host, 'migration_success') return share def _create_share_type(self, name, is_public=True, **kwargs): share_type = self.shares_admin_v2_client.create_share_type(name, is_public, **kwargs) self.addCleanup(self.shares_admin_v2_client.delete_share_type, share_type['share_type']['id']) return share_type manila-2.0.0/manila_tempest_tests/tests/scenario/test_share_basic_ops.py0000664000567000056710000003241412701407107030044 0ustar jenkinsjenkins00000000000000# Copyright 2015 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tempest import config # noqa from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from tempest import test # noqa from manila_tempest_tests.tests.scenario import manager_share as manager from manila_tempest_tests import utils CONF = config.CONF LOG = logging.getLogger(__name__) class ShareBasicOpsBase(manager.ShareScenarioTest): """This smoke test case follows this basic set of operations: * Create share network * Create share * Launch an instance * Allow access * Perform ssh to instance * Mount share * Terminate the instance """ protocol = None def setUp(self): super(ShareBasicOpsBase, self).setUp() # Setup image and flavor the test instance # Support both configured and injected values if not hasattr(self, 'flavor_ref'): self.flavor_ref = CONF.share.client_vm_flavor_ref if CONF.share.image_with_share_tools: images = self.compute_images_client.list_images()["images"] for img in images: if img["name"] == CONF.share.image_with_share_tools: self.image_ref = img['id'] break if not self.image_ref: msg = ("Image %s not found" % CONF.share.image_with_share_tools) raise exceptions.InvalidConfiguration(message=msg) self.ssh_user = CONF.share.image_username LOG.debug('Starting test for i:{image}, f:{flavor}. ' 'user: {ssh_user}'.format( image=self.image_ref, flavor=self.flavor_ref, ssh_user=self.ssh_user)) def boot_instance(self): self.keypair = self.create_keypair() security_groups = [{'name': self.security_group['name']}] create_kwargs = { 'key_name': self.keypair['name'], 'security_groups': security_groups, 'wait_until': 'ACTIVE', } if CONF.share.multitenancy_enabled: create_kwargs['networks'] = [{'uuid': self.net['id']}, ] instance = self.create_server( image_id=self.image_ref, flavor=self.flavor_ref, **create_kwargs) return instance def init_ssh(self, instance, do_ping=False): # Obtain a floating IP floating_ip = (self.compute_floating_ips_client.create_floating_ip() ['floating_ip']) self.addCleanup(self.delete_wrapper, self.compute_floating_ips_client.delete_floating_ip, floating_ip['id']) # Attach a floating IP self.compute_floating_ips_client.associate_floating_ip_to_server( floating_ip['ip'], instance['id']) # Check ssh ssh_client = self.get_remote_client( server_or_ip=floating_ip['ip'], username=self.ssh_user, private_key=self.keypair['private_key']) # NOTE(u_glide): Workaround for bug #1465682 ssh_client = ssh_client.ssh_client self.share = self.shares_client.get_share(self.share['id']) if do_ping: server_ip = self.share['export_location'].split(":")[0] ssh_client.exec_command("ping -c 1 %s" % server_ip) return ssh_client def mount_share(self, location, ssh_client): raise NotImplementedError def umount_share(self, ssh_client): ssh_client.exec_command("sudo umount /mnt") def write_data(self, data, ssh_client): ssh_client.exec_command("echo \"%s\" | sudo tee /mnt/t1 && sudo sync" % data) def read_data(self, ssh_client): data = ssh_client.exec_command("sudo cat /mnt/t1") return data.rstrip() def migrate_share(self, share_id, dest_host): share = self._migrate_share(share_id, dest_host, self.shares_admin_v2_client) return share def create_share_network(self): self.net = self._create_network(namestart="manila-share") self.subnet = self._create_subnet(network=self.net, namestart="manila-share-sub") router = self._get_router() self._create_router_interface(subnet_id=self.subnet['id'], router_id=router['id']) self.share_net = self._create_share_network( neutron_net_id=self.net['id'], neutron_subnet_id=self.subnet['id'], name=data_utils.rand_name("sn-name")) def _get_share_type(self): if CONF.share.default_share_type_name: return self.shares_client.get_share_type( CONF.share.default_share_type_name)['share_type'] return self._create_share_type( data_utils.rand_name("share_type"), extra_specs={ 'driver_handles_share_servers': CONF.share.multitenancy_enabled },)['share_type'] def create_share(self): kwargs = { 'share_protocol': self.protocol, 'share_type_id': self._get_share_type()['id'], } if CONF.share.multitenancy_enabled: self.create_share_network() kwargs.update({'share_network_id': self.share_net['id']}) self.share = self._create_share(**kwargs) def allow_access_ip(self, share_id, ip=None, instance=None, cleanup=True): if instance and not ip: try: net_addresses = instance['addresses'] first_address = net_addresses.values()[0][0] ip = first_address['addr'] except Exception: # In case on an error ip will be still none LOG.exception("Instance does not have a valid IP address." "Falling back to default") if not ip: ip = '0.0.0.0/0' self._allow_access(share_id, access_type='ip', access_to=ip, cleanup=cleanup) @test.services('compute', 'network') def test_mount_share_one_vm(self): self.security_group = self._create_security_group() self.create_share() instance = self.boot_instance() self.allow_access_ip(self.share['id'], instance=instance, cleanup=False) ssh_client = self.init_ssh(instance) if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): locations = self.share['export_locations'] else: exports = self.shares_v2_client.list_share_export_locations( self.share['id']) locations = [x['path'] for x in exports] for location in locations: self.mount_share(location, ssh_client) self.umount_share(ssh_client) self.servers_client.delete_server(instance['id']) @test.services('compute', 'network') def test_read_write_two_vms(self): """Boots two vms and writes/reads data on it.""" test_data = "Some test data to write" self.security_group = self._create_security_group() self.create_share() # boot first VM and write data instance1 = self.boot_instance() self.allow_access_ip(self.share['id'], instance=instance1, cleanup=False) ssh_client_inst1 = self.init_ssh(instance1) if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): locations = self.share['export_locations'] else: exports = self.shares_v2_client.list_share_export_locations( self.share['id']) locations = [x['path'] for x in exports] self.mount_share(locations[0], ssh_client_inst1) self.addCleanup(self.umount_share, ssh_client_inst1) self.write_data(test_data, ssh_client_inst1) # boot second VM and read instance2 = self.boot_instance() self.allow_access_ip(self.share['id'], instance=instance2) ssh_client_inst2 = self.init_ssh(instance2) self.mount_share(locations[0], ssh_client_inst2) self.addCleanup(self.umount_share, ssh_client_inst2) data = self.read_data(ssh_client_inst2) self.assertEqual(test_data, data) @test.services('compute', 'network') def test_migration_files(self): if self.protocol == "CIFS": raise self.skipException("Test for CIFS protocol not supported " "at this moment. Skipping.") if not CONF.share.run_migration_tests: raise self.skipException("Migration tests disabled. Skipping.") pools = self.shares_admin_client.list_pools()['pools'] if len(pools) < 2: raise self.skipException("At least two different pool entries " "are needed to run migration tests. " "Skipping.") self.security_group = self._create_security_group() self.create_share() share = self.shares_client.get_share(self.share['id']) dest_pool = next((x for x in pools if x['name'] != share['host']), None) self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool.get('name')) dest_pool = dest_pool['name'] instance1 = self.boot_instance() self.allow_access_ip(self.share['id'], instance=instance1, cleanup=False) ssh_client = self.init_ssh(instance1) if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): locations = self.share['export_locations'] else: exports = self.shares_v2_client.list_share_export_locations( self.share['id']) locations = [x['path'] for x in exports] self.mount_share(locations[0], ssh_client) ssh_client.exec_command("mkdir -p /mnt/f1") ssh_client.exec_command("mkdir -p /mnt/f2") ssh_client.exec_command("mkdir -p /mnt/f3") ssh_client.exec_command("mkdir -p /mnt/f4") ssh_client.exec_command("mkdir -p /mnt/f1/ff1") ssh_client.exec_command("sleep 1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M" " count=1") ssh_client.exec_command("chmod -R 555 /mnt/f3") ssh_client.exec_command("chmod -R 777 /mnt/f4") self.umount_share(ssh_client) share = self.migrate_share(share['id'], dest_pool) if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"): new_locations = self.share['export_locations'] else: new_exports = self.shares_v2_client.list_share_export_locations( self.share['id']) new_locations = [x['path'] for x in new_exports] self.assertEqual(dest_pool, share['host']) locations.sort() new_locations.sort() self.assertNotEqual(locations, new_locations) self.assertEqual('migration_success', share['task_state']) self.mount_share(new_locations[0], ssh_client) output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt") self.umount_share(ssh_client) self.assertTrue('1m1.bin' in output) self.assertTrue('1m2.bin' in output) self.assertTrue('1m3.bin' in output) self.assertTrue('1m4.bin' in output) self.assertTrue('1m5.bin' in output) class TestShareBasicOpsNFS(ShareBasicOpsBase): protocol = "NFS" def mount_share(self, location, ssh_client): ssh_client.exec_command("sudo mount -vt nfs \"%s\" /mnt" % location) class TestShareBasicOpsCIFS(ShareBasicOpsBase): protocol = "CIFS" def mount_share(self, location, ssh_client): location = location.replace("\\", "/") ssh_client.exec_command( "sudo mount.cifs \"%s\" /mnt -o guest" % location ) # NOTE(u_glide): this function is required to exclude ShareBasicOpsBase from # executed test cases. # See: https://docs.python.org/2/library/unittest.html#load-tests-protocol # for details. def load_tests(loader, tests, _): result = [] for test_case in tests: if type(test_case._tests[0]) is ShareBasicOpsBase: continue result.append(test_case) return loader.suiteClass(result) manila-2.0.0/manila_tempest_tests/utils.py0000664000567000056710000000707412701407107022060 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import re import six from tempest import config import testtools CONF = config.CONF def get_microversion_as_tuple(microversion_str): """Transforms string-like microversion to two-value tuple of integers. Tuple of integers useful for microversion comparisons. """ regex = r"^([1-9]\d*)\.([1-9]\d*|0)$" match = re.match(regex, microversion_str) if not match: raise ValueError( "Microversion does not fit template 'x.y' - %s" % microversion_str) return int(match.group(1)), int(match.group(2)) def is_microversion_gt(left, right): """Is microversion for left is greater than the right one.""" return get_microversion_as_tuple(left) > get_microversion_as_tuple(right) def is_microversion_ge(left, right): """Is microversion for left is greater than or equal to the right one.""" return get_microversion_as_tuple(left) >= get_microversion_as_tuple(right) def is_microversion_eq(left, right): """Is microversion for left is equal to the right one.""" return get_microversion_as_tuple(left) == get_microversion_as_tuple(right) def is_microversion_ne(left, right): """Is microversion for left is not equal to the right one.""" return get_microversion_as_tuple(left) != get_microversion_as_tuple(right) def is_microversion_le(left, right): """Is microversion for left is less than or equal to the right one.""" return get_microversion_as_tuple(left) <= get_microversion_as_tuple(right) def is_microversion_lt(left, right): """Is microversion for left is less than the right one.""" return get_microversion_as_tuple(left) < get_microversion_as_tuple(right) def is_microversion_supported(microversion): bottom = get_microversion_as_tuple(CONF.share.min_api_microversion) microversion = get_microversion_as_tuple(microversion) top = get_microversion_as_tuple(CONF.share.max_api_microversion) return bottom <= microversion <= top def skip_if_microversion_not_supported(microversion): """Decorator for tests that are microversion-specific.""" if not is_microversion_supported(microversion): reason = ("Skipped. Test requires microversion '%s'." % microversion) return testtools.skip(reason) return lambda f: f def skip_if_microversion_lt(microversion): """Decorator for tests that are microversion-specific.""" if is_microversion_lt(CONF.share.max_api_microversion, microversion): reason = ("Skipped. Test requires microversion greater than or " "equal to '%s'." % microversion) return testtools.skip(reason) return lambda f: f def rand_ip(): """This uses the TEST-NET-3 range of reserved IP addresses. Using this range, which are reserved solely for use in documentation and example source code, should avoid any potential conflicts in real-world testing. """ TEST_NET_3 = '203.0.113.' final_octet = six.text_type(random.randint(0, 255)) return TEST_NET_3 + final_octet manila-2.0.0/manila_tempest_tests/__init__.py0000664000567000056710000000000012701407107022435 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/0000775000567000056710000000000012701407265022166 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/0000775000567000056710000000000012701407265023270 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/__init__.py0000664000567000056710000000000012701407107025362 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/json/0000775000567000056710000000000012701407265024241 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/json/__init__.py0000664000567000056710000000000012701407107026333 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/json/shares_client.py0000664000567000056710000007115612701407107027443 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import six from six.moves.urllib import parse as urlparse from tempest import config # noqa from tempest.lib.common import rest_client from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from manila_tempest_tests import share_exceptions CONF = config.CONF class SharesClient(rest_client.RestClient): """Tempest REST client for Manila. It handles shares and access to it in OpenStack. """ def __init__(self, auth_provider): super(SharesClient, self).__init__( auth_provider, CONF.share.catalog_type, CONF.share.region or CONF.identity.region, endpoint_type=CONF.share.endpoint_type) self.share_protocol = None if CONF.share.enable_protocols: self.share_protocol = CONF.share.enable_protocols[0] self.share_network_id = CONF.share.share_network_id self.build_interval = CONF.share.build_interval self.build_timeout = CONF.share.build_timeout def create_share(self, share_protocol=None, size=1, name=None, snapshot_id=None, description=None, metadata=None, share_network_id=None, share_type_id=None, is_public=False): metadata = metadata or {} if name is None: name = data_utils.rand_name("tempest-created-share") if description is None: description = data_utils.rand_name("tempest-created-share-desc") if share_protocol is None: share_protocol = self.share_protocol if share_protocol is None: raise share_exceptions.ShareProtocolNotSpecified() post_body = { "share": { "share_proto": share_protocol, "description": description, "snapshot_id": snapshot_id, "name": name, "size": size, "metadata": metadata, "is_public": is_public, } } if share_network_id: post_body["share"]["share_network_id"] = share_network_id if share_type_id: post_body["share"]["share_type"] = share_type_id body = json.dumps(post_body) resp, body = self.post("shares", body) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share(self, share_id): resp, body = self.delete("shares/%s" % share_id) self.expected_success(202, resp.status) return body def manage_share(self, service_host, protocol, export_path, share_type_id, name=None, description=None): post_body = { "share": { "export_path": export_path, "service_host": service_host, "protocol": protocol, "share_type": share_type_id, "name": name, "description": description, } } body = json.dumps(post_body) resp, body = self.post("os-share-manage", body) self.expected_success(200, resp.status) return self._parse_resp(body) def unmanage_share(self, share_id): resp, body = self.post( "os-share-unmanage/%s/unmanage" % share_id, None) self.expected_success(202, resp.status) return body def list_shares(self, detailed=False, params=None): """Get list of shares w/o filters.""" uri = 'shares/detail' if detailed else 'shares' uri += '?%s' % urlparse.urlencode(params) if params else '' resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def list_shares_with_detail(self, params=None): """Get detailed list of shares w/o filters.""" return self.list_shares(detailed=True, params=params) def get_share(self, share_id): resp, body = self.get("shares/%s" % share_id) self.expected_success(200, resp.status) return self._parse_resp(body) def create_access_rule(self, share_id, access_type="ip", access_to="0.0.0.0", access_level=None): post_body = { "os-allow_access": { "access_type": access_type, "access_to": access_to, "access_level": access_level, } } body = json.dumps(post_body) resp, body = self.post("shares/%s/action" % share_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def list_access_rules(self, share_id): body = {"os-access_list": None} resp, body = self.post("shares/%s/action" % share_id, json.dumps(body)) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_access_rule(self, share_id, rule_id): post_body = { "os-deny_access": { "access_id": rule_id, } } body = json.dumps(post_body) resp, body = self.post("shares/%s/action" % share_id, body) self.expected_success(202, resp.status) return body def extend_share(self, share_id, new_size): post_body = { "os-extend": { "new_size": new_size, } } body = json.dumps(post_body) resp, body = self.post("shares/%s/action" % share_id, body) self.expected_success(202, resp.status) return body def shrink_share(self, share_id, new_size): post_body = { "os-shrink": { "new_size": new_size, } } body = json.dumps(post_body) resp, body = self.post("shares/%s/action" % share_id, body) self.expected_success(202, resp.status) return body def create_snapshot(self, share_id, name=None, description=None, force=False): if name is None: name = data_utils.rand_name("tempest-created-share-snap") if description is None: description = data_utils.rand_name( "tempest-created-share-snap-desc") post_body = { "snapshot": { "name": name, "force": force, "description": description, "share_id": share_id, } } body = json.dumps(post_body) resp, body = self.post("snapshots", body) self.expected_success(202, resp.status) return self._parse_resp(body) def get_snapshot(self, snapshot_id): resp, body = self.get("snapshots/%s" % snapshot_id) self.expected_success(200, resp.status) return self._parse_resp(body) def list_snapshots(self, detailed=False, params=None): """Get list of share snapshots w/o filters.""" uri = 'snapshots/detail' if detailed else 'snapshots' uri += '?%s' % urlparse.urlencode(params) if params else '' resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def list_snapshots_with_detail(self, params=None): """Get detailed list of share snapshots w/o filters.""" return self.list_snapshots(detailed=True, params=params) def delete_snapshot(self, snap_id): resp, body = self.delete("snapshots/%s" % snap_id) self.expected_success(202, resp.status) return body def wait_for_share_status(self, share_id, status): """Waits for a share to reach a given status.""" body = self.get_share(share_id) share_name = body['name'] share_status = body['status'] start = int(time.time()) while share_status != status: time.sleep(self.build_interval) body = self.get_share(share_id) share_status = body['status'] if share_status == status: return elif 'error' in share_status.lower(): raise share_exceptions.\ ShareBuildErrorException(share_id=share_id) if int(time.time()) - start >= self.build_timeout: message = ('Share %s failed to reach %s status within ' 'the required time (%s s).' % (share_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a snapshot to reach a given status.""" body = self.get_snapshot(snapshot_id) snapshot_name = body['name'] snapshot_status = body['status'] start = int(time.time()) while snapshot_status != status: time.sleep(self.build_interval) body = self.get_snapshot(snapshot_id) snapshot_status = body['status'] if 'error' in snapshot_status: raise share_exceptions.\ SnapshotBuildErrorException(snapshot_id=snapshot_id) if int(time.time()) - start >= self.build_timeout: message = ('Share Snapshot %s failed to reach %s status ' 'within the required time (%s s).' % (snapshot_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) def wait_for_access_rule_status(self, share_id, rule_id, status): """Waits for an access rule to reach a given status.""" rule_status = "new" start = int(time.time()) while rule_status != status: time.sleep(self.build_interval) rules = self.list_access_rules(share_id) for rule in rules: if rule["id"] in rule_id: rule_status = rule['state'] break if 'error' in rule_status: raise share_exceptions.\ AccessRuleBuildErrorException(rule_id=rule_id) if int(time.time()) - start >= self.build_timeout: message = ('Share Access Rule %s failed to reach %s status ' 'within the required time (%s s).' % (rule_id, status, self.build_timeout)) raise exceptions.TimeoutException(message) def default_quotas(self, tenant_id): resp, body = self.get("os-quota-sets/%s/defaults" % tenant_id) self.expected_success(200, resp.status) return self._parse_resp(body) def show_quotas(self, tenant_id, user_id=None): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def reset_quotas(self, tenant_id, user_id=None): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id resp, body = self.delete(uri) self.expected_success(202, resp.status) return body def update_quotas(self, tenant_id, user_id=None, shares=None, snapshots=None, gigabytes=None, snapshot_gigabytes=None, share_networks=None, force=True): uri = "os-quota-sets/%s" % tenant_id if user_id is not None: uri += "?user_id=%s" % user_id put_body = {"tenant_id": tenant_id} if force: put_body["force"] = "true" if shares is not None: put_body["shares"] = shares if snapshots is not None: put_body["snapshots"] = snapshots if gigabytes is not None: put_body["gigabytes"] = gigabytes if snapshot_gigabytes is not None: put_body["snapshot_gigabytes"] = snapshot_gigabytes if share_networks is not None: put_body["share_networks"] = share_networks put_body = json.dumps({"quota_set": put_body}) resp, body = self.put(uri, put_body) self.expected_success(200, resp.status) return self._parse_resp(body) def get_limits(self): resp, body = self.get("limits") self.expected_success(200, resp.status) return self._parse_resp(body) def is_resource_deleted(self, *args, **kwargs): """Verifies whether provided resource deleted or not. :param kwargs: dict with expected keys 'share_id', 'snapshot_id', :param kwargs: 'sn_id', 'ss_id', 'vt_id' and 'server_id' :raises share_exceptions.InvalidResource """ if "share_id" in kwargs: if "rule_id" in kwargs: rule_id = kwargs.get("rule_id") share_id = kwargs.get("share_id") rules = self.list_access_rules(share_id) for rule in rules: if rule["id"] == rule_id: return False return True else: return self._is_resource_deleted( self.get_share, kwargs.get("share_id")) elif "snapshot_id" in kwargs: return self._is_resource_deleted( self.get_snapshot, kwargs.get("snapshot_id")) elif "sn_id" in kwargs: return self._is_resource_deleted( self.get_share_network, kwargs.get("sn_id")) elif "ss_id" in kwargs: return self._is_resource_deleted( self.get_security_service, kwargs.get("ss_id")) elif "vt_id" in kwargs: return self._is_resource_deleted( self.get_volume_type, kwargs.get("vt_id")) elif "st_id" in kwargs: return self._is_resource_deleted( self.get_share_type, kwargs.get("st_id")) elif "server_id" in kwargs: return self._is_resource_deleted( self.show_share_server, kwargs.get("server_id")) else: raise share_exceptions.InvalidResource( message=six.text_type(kwargs)) def _is_resource_deleted(self, func, res_id): try: res = func(res_id) except exceptions.NotFound: return True if res.get('status') in ['error_deleting', 'error']: # Resource has "error_deleting" status and can not be deleted. resource_type = func.__name__.split('_', 1)[-1] raise share_exceptions.ResourceReleaseFailed( res_type=resource_type, res_id=res_id) return False def wait_for_resource_deletion(self, *args, **kwargs): """Waits for a resource to be deleted.""" start_time = int(time.time()) while True: if self.is_resource_deleted(*args, **kwargs): return if int(time.time()) - start_time >= self.build_timeout: raise exceptions.TimeoutException time.sleep(self.build_interval) def list_extensions(self): resp, extensions = self.get("extensions") self.expected_success(200, resp.status) return self._parse_resp(extensions) def update_share(self, share_id, name=None, desc=None, is_public=None): body = {"share": {}} if name is not None: body["share"].update({"display_name": name}) if desc is not None: body["share"].update({"display_description": desc}) if is_public is not None: body["share"].update({"is_public": is_public}) body = json.dumps(body) resp, body = self.put("shares/%s" % share_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def rename_snapshot(self, snapshot_id, name, desc=None): body = {"snapshot": {"display_name": name}} if desc is not None: body["snapshot"].update({"display_description": desc}) body = json.dumps(body) resp, body = self.put("snapshots/%s" % snapshot_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def reset_state(self, s_id, status="error", s_type="shares"): """Resets the state of a share or a snapshot. status: available, error, creating, deleting, error_deleting s_type: shares, snapshots """ body = {"os-reset_status": {"status": status}} body = json.dumps(body) resp, body = self.post("%s/%s/action" % (s_type, s_id), body) self.expected_success(202, resp.status) return body def force_delete(self, s_id, s_type="shares"): """Force delete share or snapshot. s_type: shares, snapshots """ body = {"os-force_delete": None} body = json.dumps(body) resp, body = self.post("%s/%s/action" % (s_type, s_id), body) self.expected_success(202, resp.status) return body ############### def list_services(self, params=None): """List services.""" uri = 'os-services' if params: uri += '?%s' % urlparse.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def _update_metadata(self, share_id, metadata=None, method="post"): uri = "shares/%s/metadata" % share_id if metadata is None: metadata = {} post_body = {"metadata": metadata} body = json.dumps(post_body) if method is "post": resp, metadata = self.post(uri, body) if method is "put": resp, metadata = self.put(uri, body) self.expected_success(200, resp.status) return self._parse_resp(metadata) def set_metadata(self, share_id, metadata=None): return self._update_metadata(share_id, metadata) def update_all_metadata(self, share_id, metadata=None): return self._update_metadata(share_id, metadata, method="put") def delete_metadata(self, share_id, key): resp, body = self.delete("shares/%s/metadata/%s" % (share_id, key)) self.expected_success(200, resp.status) return body def get_metadata(self, share_id): resp, body = self.get("shares/%s/metadata" % share_id) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def create_security_service(self, ss_type="ldap", **kwargs): # ss_type: ldap, kerberos, active_directory # kwargs: name, description, dns_ip, server, domain, user, password post_body = {"type": ss_type} post_body.update(kwargs) body = json.dumps({"security_service": post_body}) resp, body = self.post("security-services", body) self.expected_success(200, resp.status) return self._parse_resp(body) def update_security_service(self, ss_id, **kwargs): # ss_id - id of security-service entity # kwargs: dns_ip, server, domain, user, password, name, description # for 'active' status can be changed # only 'name' and 'description' fields body = json.dumps({"security_service": kwargs}) resp, body = self.put("security-services/%s" % ss_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def get_security_service(self, ss_id): resp, body = self.get("security-services/%s" % ss_id) self.expected_success(200, resp.status) return self._parse_resp(body) def list_security_services(self, detailed=False, params=None): uri = "security-services" if detailed: uri += '/detail' if params: uri += "?%s" % urlparse.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_security_service(self, ss_id): resp, body = self.delete("security-services/%s" % ss_id) self.expected_success(202, resp.status) return body ############### def create_share_network(self, **kwargs): # kwargs: name, description # + for neutron: neutron_net_id, neutron_subnet_id body = json.dumps({"share_network": kwargs}) resp, body = self.post("share-networks", body) self.expected_success(200, resp.status) return self._parse_resp(body) def update_share_network(self, sn_id, **kwargs): # kwargs: name, description # + for neutron: neutron_net_id, neutron_subnet_id body = json.dumps({"share_network": kwargs}) resp, body = self.put("share-networks/%s" % sn_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_network(self, sn_id): resp, body = self.get("share-networks/%s" % sn_id) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_networks(self): resp, body = self.get("share-networks") self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_networks_with_detail(self, params=None): """List the details of all shares.""" uri = "share-networks/detail" if params: uri += "?%s" % urlparse.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_network(self, sn_id): resp, body = self.delete("share-networks/%s" % sn_id) self.expected_success(202, resp.status) return body ############### def _map_security_service_and_share_network(self, sn_id, ss_id, action="add"): # sn_id: id of share_network_entity # ss_id: id of security service entity # action: add, remove data = { "%s_security_service" % action: { "security_service_id": ss_id, } } body = json.dumps(data) resp, body = self.post("share-networks/%s/action" % sn_id, body) self.expected_success(200, resp.status) return self._parse_resp(body) def add_sec_service_to_share_network(self, sn_id, ss_id): body = self._map_security_service_and_share_network(sn_id, ss_id) return body def remove_sec_service_from_share_network(self, sn_id, ss_id): body = self._map_security_service_and_share_network( sn_id, ss_id, "remove") return body def list_sec_services_for_share_network(self, sn_id): resp, body = self.get("security-services?share_network_id=%s" % sn_id) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def list_share_types(self, params=None): uri = 'types' if params is not None: uri += '?%s' % urlparse.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def create_share_type(self, name, is_public=True, **kwargs): post_body = { 'name': name, 'extra_specs': kwargs.get('extra_specs'), 'os-share-type-access:is_public': is_public, } post_body = json.dumps({'share_type': post_body}) resp, body = self.post('types', post_body) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_type(self, share_type_id): resp, body = self.delete("types/%s" % share_type_id) self.expected_success(202, resp.status) return body def get_share_type(self, share_type_id): resp, body = self.get("types/%s" % share_type_id) self.expected_success(200, resp.status) return self._parse_resp(body) def add_access_to_share_type(self, share_type_id, project_id): uri = 'types/%s/action' % share_type_id post_body = {'project': project_id} post_body = json.dumps({'addProjectAccess': post_body}) resp, body = self.post(uri, post_body) self.expected_success(202, resp.status) return body def remove_access_from_share_type(self, share_type_id, project_id): uri = 'types/%s/action' % share_type_id post_body = {'project': project_id} post_body = json.dumps({'removeProjectAccess': post_body}) resp, body = self.post(uri, post_body) self.expected_success(202, resp.status) return body def list_access_to_share_type(self, share_type_id): uri = 'types/%s/os-share-type-access' % share_type_id resp, body = self.get(uri) # [{"share_type_id": "%st_id%", "project_id": "%project_id%"}, ] self.expected_success(200, resp.status) return self._parse_resp(body) ############### def create_share_type_extra_specs(self, share_type_id, extra_specs): url = "types/%s/extra_specs" % share_type_id post_body = json.dumps({'extra_specs': extra_specs}) resp, body = self.post(url, post_body) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_type_extra_spec(self, share_type_id, extra_spec_name): uri = "types/%s/extra_specs/%s" % (share_type_id, extra_spec_name) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_type_extra_specs(self, share_type_id, params=None): uri = "types/%s/extra_specs" % share_type_id if params is not None: uri += '?%s' % urlparse.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def update_share_type_extra_spec(self, share_type_id, spec_name, spec_value): uri = "types/%s/extra_specs/%s" % (share_type_id, spec_name) extra_spec = {spec_name: spec_value} post_body = json.dumps(extra_spec) resp, body = self.put(uri, post_body) self.expected_success(200, resp.status) return self._parse_resp(body) def update_share_type_extra_specs(self, share_type_id, extra_specs): uri = "types/%s/extra_specs" % share_type_id extra_specs = {"extra_specs": extra_specs} post_body = json.dumps(extra_specs) resp, body = self.post(uri, post_body) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_type_extra_spec(self, share_type_id, extra_spec_name): uri = "types/%s/extra_specs/%s" % (share_type_id, extra_spec_name) resp, body = self.delete(uri) self.expected_success(202, resp.status) return body ############### def list_share_servers(self, search_opts=None): """Get list of share servers.""" uri = "share-servers" if search_opts: uri += "?%s" % urlparse.urlencode(search_opts) resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_server(self, share_server_id): """Delete share server by its ID.""" uri = "share-servers/%s" % share_server_id resp, body = self.delete(uri) self.expected_success(202, resp.status) return body def show_share_server(self, share_server_id): """Get share server info.""" uri = "share-servers/%s" % share_server_id resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) def show_share_server_details(self, share_server_id): """Get share server details only.""" uri = "share-servers/%s/details" % share_server_id resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def list_pools(self, detail=False, search_opts=None): """Get list of scheduler pools.""" uri = 'scheduler-stats/pools' if detail: uri += '/detail' if search_opts: uri += "?%s" % urlparse.urlencode(search_opts) resp, body = self.get(uri) self.expected_success(200, resp.status) return json.loads(body) ############### def list_availability_zones(self): """Get list of availability zones.""" uri = 'os-availability-zone' resp, body = self.get(uri) self.expected_success(200, resp.status) return self._parse_resp(body) manila-2.0.0/manila_tempest_tests/services/share/v2/0000775000567000056710000000000012701407265023617 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/v2/__init__.py0000664000567000056710000000000012701407107025711 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/v2/json/0000775000567000056710000000000012701407265024570 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/v2/json/__init__.py0000664000567000056710000000000012701407107026662 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/services/share/v2/json/shares_client.py0000664000567000056710000014502412701407107027766 0ustar jenkinsjenkins00000000000000# Copyright 2015 Andrew Kerr # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time from six.moves.urllib import parse as urlparse from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions from manila_tempest_tests.common import constants from manila_tempest_tests.services.share.json import shares_client from manila_tempest_tests import share_exceptions from manila_tempest_tests import utils CONF = config.CONF LATEST_MICROVERSION = CONF.share.max_api_microversion EXPERIMENTAL = {'X-OpenStack-Manila-API-Experimental': 'True'} class SharesV2Client(shares_client.SharesClient): """Tempest REST client for Manila. It handles shares and access to it in OpenStack. """ api_version = 'v2' def __init__(self, auth_provider): super(SharesV2Client, self).__init__(auth_provider) self.API_MICROVERSIONS_HEADER = 'x-openstack-manila-api-version' def inject_microversion_header(self, headers, version, extra_headers=False): """Inject the required manila microversion header.""" new_headers = self.get_headers() new_headers[self.API_MICROVERSIONS_HEADER] = version if extra_headers and headers: new_headers.update(headers) elif headers: new_headers = headers return new_headers # Overwrite all http verb calls to inject the micro version header def post(self, url, body, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).post(url, body, headers=headers) def get(self, url, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).get(url, headers=headers) def delete(self, url, headers=None, body=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).delete(url, headers=headers, body=body) def patch(self, url, body, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).patch(url, body, headers=headers) def put(self, url, body, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).put(url, body, headers=headers) def head(self, url, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).head(url, headers=headers) def copy(self, url, headers=None, extra_headers=False, version=LATEST_MICROVERSION): headers = self.inject_microversion_header(headers, version, extra_headers=extra_headers) return super(SharesV2Client, self).copy(url, headers=headers) def reset_state(self, s_id, status="error", s_type="shares", headers=None, version=LATEST_MICROVERSION, action_name=None): """Resets the state of a share, snapshot, cg, or a cgsnapshot. status: available, error, creating, deleting, error_deleting s_type: shares, share_instances, snapshots, consistency-groups, cgsnapshots. """ if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'reset_status' else: action_name = 'os-reset_status' body = {action_name: {"status": status}} body = json.dumps(body) resp, body = self.post("%s/%s/action" % (s_type, s_id), body, headers=headers, extra_headers=True, version=version) self.expected_success(202, resp.status) return body def force_delete(self, s_id, s_type="shares", headers=None, version=LATEST_MICROVERSION, action_name=None): """Force delete share or snapshot. s_type: shares, snapshots """ if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'force_delete' else: action_name = 'os-force_delete' body = {action_name: None} body = json.dumps(body) resp, body = self.post("%s/%s/action" % (s_type, s_id), body, headers=headers, extra_headers=True, version=version) self.expected_success(202, resp.status) return body def send_microversion_request(self, version=None, script_name=None): """Prepare and send the HTTP GET Request to the base URL. Extracts the base URL from the shares_client endpoint and makes a GET request with the microversions request header. :param version: The string to send for the value of the microversion header, or None to omit the header. :param script_name: The first part of the URL (v1 or v2), or None to omit it. """ headers = self.get_headers() url, headers, body = self.auth_provider.auth_request( 'GET', 'shares', headers, None, self.filters) url = '/'.join(url.split('/')[:3]) + '/' if script_name: url += script_name + '/' if version: headers[self.API_MICROVERSIONS_HEADER] = version resp, resp_body = self.raw_request(url, 'GET', headers=headers) self.response_checker('GET', resp, resp_body) resp_body = json.loads(resp_body) return resp, resp_body def is_resource_deleted(self, *args, **kwargs): """Verifies whether provided resource deleted or not. :param kwargs: dict with expected keys 'share_id', 'snapshot_id', :param kwargs: 'sn_id', 'ss_id', 'vt_id' and 'server_id' :raises share_exceptions.InvalidResource """ if "share_instance_id" in kwargs: return self._is_resource_deleted( self.get_share_instance, kwargs.get("share_instance_id")) elif "cg_id" in kwargs: return self._is_resource_deleted( self.get_consistency_group, kwargs.get("cg_id")) elif "cgsnapshot_id" in kwargs: return self._is_resource_deleted( self.get_cgsnapshot, kwargs.get("cgsnapshot_id")) elif "replica_id" in kwargs: return self._is_resource_deleted( self.get_share_replica, kwargs.get("replica_id")) else: return super(SharesV2Client, self).is_resource_deleted( *args, **kwargs) ############### def create_share(self, share_protocol=None, size=1, name=None, snapshot_id=None, description=None, metadata=None, share_network_id=None, share_type_id=None, is_public=False, consistency_group_id=None, availability_zone=None, version=LATEST_MICROVERSION): metadata = metadata or {} if name is None: name = data_utils.rand_name("tempest-created-share") if description is None: description = data_utils.rand_name("tempest-created-share-desc") if share_protocol is None: share_protocol = self.share_protocol if share_protocol is None: raise share_exceptions.ShareProtocolNotSpecified() post_body = { "share": { "share_proto": share_protocol, "description": description, "snapshot_id": snapshot_id, "name": name, "size": size, "metadata": metadata, "is_public": is_public, } } if availability_zone: post_body["share"]["availability_zone"] = availability_zone if share_network_id: post_body["share"]["share_network_id"] = share_network_id if share_type_id: post_body["share"]["share_type"] = share_type_id if consistency_group_id: post_body["share"]["consistency_group_id"] = consistency_group_id body = json.dumps(post_body) resp, body = self.post("shares", body, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_shares(self, detailed=False, params=None, version=LATEST_MICROVERSION): """Get list of shares w/o filters.""" uri = 'shares/detail' if detailed else 'shares' uri += '?%s' % urlparse.urlencode(params) if params else '' resp, body = self.get(uri, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_shares_with_detail(self, params=None, version=LATEST_MICROVERSION): """Get detailed list of shares w/o filters.""" return self.list_shares(detailed=True, params=params, version=version) def get_share(self, share_id, version=LATEST_MICROVERSION): resp, body = self.get("shares/%s" % share_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_export_location( self, share_id, export_location_uuid, version=LATEST_MICROVERSION): resp, body = self.get( "shares/%(share_id)s/export_locations/%(el_uuid)s" % { "share_id": share_id, "el_uuid": export_location_uuid}, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_export_locations( self, share_id, version=LATEST_MICROVERSION): resp, body = self.get( "shares/%(share_id)s/export_locations" % {"share_id": share_id}, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share(self, share_id, params=None, version=LATEST_MICROVERSION): uri = "shares/%s" % share_id uri += '?%s' % (urlparse.urlencode(params) if params else '') resp, body = self.delete(uri, version=version) self.expected_success(202, resp.status) return body ############### def get_instances_of_share(self, share_id, version=LATEST_MICROVERSION): resp, body = self.get("shares/%s/instances" % share_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_instances(self, version=LATEST_MICROVERSION): resp, body = self.get("share_instances", version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_instance(self, instance_id, version=LATEST_MICROVERSION): resp, body = self.get("share_instances/%s" % instance_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def get_share_instance_export_location( self, instance_id, export_location_uuid, version=LATEST_MICROVERSION): resp, body = self.get( "share_instances/%(instance_id)s/export_locations/%(el_uuid)s" % { "instance_id": instance_id, "el_uuid": export_location_uuid}, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_instance_export_locations( self, instance_id, version=LATEST_MICROVERSION): resp, body = self.get( "share_instances/%s/export_locations" % instance_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def wait_for_share_instance_status(self, instance_id, status, version=LATEST_MICROVERSION): """Waits for a share to reach a given status.""" body = self.get_share_instance(instance_id, version=version) instance_status = body['status'] start = int(time.time()) while instance_status != status: time.sleep(self.build_interval) body = self.get_share(instance_id) instance_status = body['status'] if instance_status == status: return elif 'error' in instance_status.lower(): raise share_exceptions. \ ShareInstanceBuildErrorException(id=instance_id) if int(time.time()) - start >= self.build_timeout: message = ('Share instance %s failed to reach %s status within' ' the required time (%s s).' % (instance_id, status, self.build_timeout)) raise exceptions.TimeoutException(message) def wait_for_share_status(self, share_id, status, status_attr='status', version=LATEST_MICROVERSION): """Waits for a share to reach a given status.""" body = self.get_share(share_id, version=version) share_status = body[status_attr] start = int(time.time()) while share_status != status: time.sleep(self.build_interval) body = self.get_share(share_id, version=version) share_status = body[status_attr] if share_status == status: return elif 'error' in share_status.lower(): raise share_exceptions.ShareBuildErrorException( share_id=share_id) if int(time.time()) - start >= self.build_timeout: message = ("Share's %(status_attr)s failed to transition to " "%(status)s within the required time %(seconds)s." % {"status_attr": status_attr, "status": status, "seconds": self.build_timeout}) raise exceptions.TimeoutException(message) ############### def extend_share(self, share_id, new_size, version=LATEST_MICROVERSION, action_name=None): if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'extend' else: action_name = 'os-extend' post_body = { action_name: { "new_size": new_size, } } body = json.dumps(post_body) resp, body = self.post( "shares/%s/action" % share_id, body, version=version) self.expected_success(202, resp.status) return body def shrink_share(self, share_id, new_size, version=LATEST_MICROVERSION, action_name=None): if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'shrink' else: action_name = 'os-shrink' post_body = { action_name: { "new_size": new_size, } } body = json.dumps(post_body) resp, body = self.post( "shares/%s/action" % share_id, body, version=version) self.expected_success(202, resp.status) return body ############### def manage_share(self, service_host, protocol, export_path, share_type_id, name=None, description=None, is_public=False, version=LATEST_MICROVERSION, url=None): post_body = { "share": { "export_path": export_path, "service_host": service_host, "protocol": protocol, "share_type": share_type_id, "name": name, "description": description, "is_public": is_public, } } if url is None: if utils.is_microversion_gt(version, "2.6"): url = 'shares/manage' else: url = 'os-share-manage' body = json.dumps(post_body) resp, body = self.post(url, body, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def unmanage_share(self, share_id, version=LATEST_MICROVERSION, url=None, action_name=None, body=None): if url is None: if utils.is_microversion_gt(version, "2.6"): url = 'shares' else: url = 'os-share-unmanage' if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'action' else: action_name = 'unmanage' if body is None and utils.is_microversion_gt(version, "2.6"): body = json.dumps({'unmanage': {}}) resp, body = self.post( "%(url)s/%(share_id)s/%(action_name)s" % { 'url': url, 'share_id': share_id, 'action_name': action_name}, body, version=version) self.expected_success(202, resp.status) return body ############### def create_snapshot(self, share_id, name=None, description=None, force=False, version=LATEST_MICROVERSION): if name is None: name = data_utils.rand_name("tempest-created-share-snap") if description is None: description = data_utils.rand_name( "tempest-created-share-snap-desc") post_body = { "snapshot": { "name": name, "force": force, "description": description, "share_id": share_id, } } body = json.dumps(post_body) resp, body = self.post("snapshots", body, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def get_snapshot(self, snapshot_id, version=LATEST_MICROVERSION): resp, body = self.get("snapshots/%s" % snapshot_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_snapshots(self, detailed=False, params=None, version=LATEST_MICROVERSION): """Get list of share snapshots w/o filters.""" uri = 'snapshots/detail' if detailed else 'snapshots' uri += '?%s' % urlparse.urlencode(params) if params else '' resp, body = self.get(uri, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_snapshots_with_detail(self, params=None, version=LATEST_MICROVERSION): """Get detailed list of share snapshots w/o filters.""" return self.list_snapshots(detailed=True, params=params, version=version) def delete_snapshot(self, snap_id, version=LATEST_MICROVERSION): resp, body = self.delete("snapshots/%s" % snap_id, version=version) self.expected_success(202, resp.status) return body def wait_for_snapshot_status(self, snapshot_id, status, version=LATEST_MICROVERSION): """Waits for a snapshot to reach a given status.""" body = self.get_snapshot(snapshot_id, version=version) snapshot_name = body['name'] snapshot_status = body['status'] start = int(time.time()) while snapshot_status != status: time.sleep(self.build_interval) body = self.get_snapshot(snapshot_id, version=version) snapshot_status = body['status'] if 'error' in snapshot_status: raise (share_exceptions. SnapshotBuildErrorException(snapshot_id=snapshot_id)) if int(time.time()) - start >= self.build_timeout: message = ('Share Snapshot %s failed to reach %s status ' 'within the required time (%s s).' % (snapshot_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) def manage_snapshot(self, share_id, provider_location, name=None, description=None, version=LATEST_MICROVERSION, driver_options=None): if name is None: name = data_utils.rand_name("tempest-manage-snapshot") if description is None: description = data_utils.rand_name("tempest-manage-snapshot-desc") post_body = { "snapshot": { "share_id": share_id, "provider_location": provider_location, "name": name, "description": description, "driver_options": driver_options if driver_options else {}, } } url = 'snapshots/manage' body = json.dumps(post_body) resp, body = self.post(url, body, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def unmanage_snapshot(self, snapshot_id, version=LATEST_MICROVERSION, body=None): url = 'snapshots' action_name = 'action' if body is None: body = json.dumps({'unmanage': {}}) resp, body = self.post( "%(url)s/%(snapshot_id)s/%(action_name)s" % { 'url': url, 'snapshot_id': snapshot_id, 'action_name': action_name}, body, version=version) self.expected_success(202, resp.status) return body ############### def _get_access_action_name(self, version, action): if utils.is_microversion_gt(version, "2.6"): return action.split('os-')[-1] return action def create_access_rule(self, share_id, access_type="ip", access_to="0.0.0.0", access_level=None, version=LATEST_MICROVERSION, action_name=None): post_body = { self._get_access_action_name(version, 'os-allow_access'): { "access_type": access_type, "access_to": access_to, "access_level": access_level, } } body = json.dumps(post_body) resp, body = self.post( "shares/%s/action" % share_id, body, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_access_rules(self, share_id, version=LATEST_MICROVERSION, action_name=None): body = {self._get_access_action_name(version, 'os-access_list'): None} resp, body = self.post( "shares/%s/action" % share_id, json.dumps(body), version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_access_rule(self, share_id, rule_id, version=LATEST_MICROVERSION, action_name=None): post_body = { self._get_access_action_name(version, 'os-deny_access'): { "access_id": rule_id, } } body = json.dumps(post_body) resp, body = self.post( "shares/%s/action" % share_id, body, version=version) self.expected_success(202, resp.status) return body ############### def list_availability_zones(self, url='availability-zones', version=LATEST_MICROVERSION): """Get list of availability zones.""" if url is None: if utils.is_microversion_gt(version, "2.6"): url = 'availability-zones' else: url = 'os-availability-zone' resp, body = self.get(url, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def list_services(self, params=None, url=None, version=LATEST_MICROVERSION): """List services.""" if url is None: if utils.is_microversion_gt(version, "2.6"): url = 'services' else: url = 'os-services' if params: url += '?%s' % urlparse.urlencode(params) resp, body = self.get(url, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def list_share_types(self, params=None, version=LATEST_MICROVERSION): uri = 'types' if params is not None: uri += '?%s' % urlparse.urlencode(params) resp, body = self.get(uri, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def create_share_type(self, name, is_public=True, version=LATEST_MICROVERSION, **kwargs): if utils.is_microversion_gt(version, "2.6"): is_public_keyname = 'share_type_access:is_public' else: is_public_keyname = 'os-share-type-access:is_public' post_body = { 'name': name, 'extra_specs': kwargs.get('extra_specs'), is_public_keyname: is_public, } post_body = json.dumps({'share_type': post_body}) resp, body = self.post('types', post_body, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_type(self, share_type_id, version=LATEST_MICROVERSION): resp, body = self.delete("types/%s" % share_type_id, version=version) self.expected_success(202, resp.status) return body def get_share_type(self, share_type_id, version=LATEST_MICROVERSION): resp, body = self.get("types/%s" % share_type_id, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_access_to_share_type(self, share_type_id, version=LATEST_MICROVERSION, action_name=None): if action_name is None: if utils.is_microversion_gt(version, "2.6"): action_name = 'share_type_access' else: action_name = 'os-share-type-access' url = 'types/%(st_id)s/%(action_name)s' % { 'st_id': share_type_id, 'action_name': action_name} resp, body = self.get(url, version=version) # [{"share_type_id": "%st_id%", "project_id": "%project_id%"}, ] self.expected_success(200, resp.status) return self._parse_resp(body) ############### def _get_quotas_url(self, version): if utils.is_microversion_gt(version, "2.6"): return 'quota-sets' return 'os-quota-sets' def default_quotas(self, tenant_id, url=None, version=LATEST_MICROVERSION): if url is None: url = self._get_quotas_url(version) url += '/%s' % tenant_id resp, body = self.get("%s/defaults" % url, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def show_quotas(self, tenant_id, user_id=None, url=None, version=LATEST_MICROVERSION): if url is None: url = self._get_quotas_url(version) url += '/%s' % tenant_id if user_id is not None: url += "?user_id=%s" % user_id resp, body = self.get(url, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def reset_quotas(self, tenant_id, user_id=None, url=None, version=LATEST_MICROVERSION): if url is None: url = self._get_quotas_url(version) url += '/%s' % tenant_id if user_id is not None: url += "?user_id=%s" % user_id resp, body = self.delete(url, version=version) self.expected_success(202, resp.status) return body def update_quotas(self, tenant_id, user_id=None, shares=None, snapshots=None, gigabytes=None, snapshot_gigabytes=None, share_networks=None, force=True, url=None, version=LATEST_MICROVERSION): if url is None: url = self._get_quotas_url(version) url += '/%s' % tenant_id if user_id is not None: url += "?user_id=%s" % user_id put_body = {"tenant_id": tenant_id} if force: put_body["force"] = "true" if shares is not None: put_body["shares"] = shares if snapshots is not None: put_body["snapshots"] = snapshots if gigabytes is not None: put_body["gigabytes"] = gigabytes if snapshot_gigabytes is not None: put_body["snapshot_gigabytes"] = snapshot_gigabytes if share_networks is not None: put_body["share_networks"] = share_networks put_body = json.dumps({"quota_set": put_body}) resp, body = self.put(url, put_body, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) ############### def create_consistency_group(self, name=None, description=None, share_type_ids=(), share_network_id=None, source_cgsnapshot_id=None, version=LATEST_MICROVERSION): """Create a new consistency group.""" uri = 'consistency-groups' post_body = {} if name: post_body['name'] = name if description: post_body['description'] = description if share_type_ids: post_body['share_types'] = share_type_ids if source_cgsnapshot_id: post_body['source_cgsnapshot_id'] = source_cgsnapshot_id if share_network_id: post_body['share_network_id'] = share_network_id body = json.dumps({'consistency_group': post_body}) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def delete_consistency_group(self, consistency_group_id, version=LATEST_MICROVERSION): """Delete a consistency group.""" uri = 'consistency-groups/%s' % consistency_group_id resp, body = self.delete(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return body def list_consistency_groups(self, detailed=False, params=None, version=LATEST_MICROVERSION): """Get list of consistency groups w/o filters.""" uri = 'consistency-groups%s' % ('/detail' if detailed else '') uri += '?%s' % (urlparse.urlencode(params) if params else '') resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def get_consistency_group(self, consistency_group_id, version=LATEST_MICROVERSION): """Get consistency group info.""" uri = 'consistency-groups/%s' % consistency_group_id resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def update_consistency_group(self, consistency_group_id, name=None, description=None, version=LATEST_MICROVERSION, **kwargs): """Update an existing consistency group.""" uri = 'consistency-groups/%s' % consistency_group_id post_body = {} if name: post_body['name'] = name if description: post_body['description'] = description if kwargs: post_body.update(kwargs) body = json.dumps({'consistency_group': post_body}) resp, body = self.put(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def consistency_group_reset_state(self, id, status, version=LATEST_MICROVERSION): self.reset_state(id, status=status, s_type='consistency-groups', headers=EXPERIMENTAL, version=version) def consistency_group_force_delete(self, id, version=LATEST_MICROVERSION): self.force_delete(id, s_type='consistency-groups', headers=EXPERIMENTAL, version=version) def wait_for_consistency_group_status(self, consistency_group_id, status): """Waits for a consistency group to reach a given status.""" body = self.get_consistency_group(consistency_group_id) consistency_group_name = body['name'] consistency_group_status = body['status'] start = int(time.time()) while consistency_group_status != status: time.sleep(self.build_interval) body = self.get_consistency_group(consistency_group_id) consistency_group_status = body['status'] if 'error' in consistency_group_status and status != 'error': raise share_exceptions.ConsistencyGroupBuildErrorException( consistency_group_id=consistency_group_id) if int(time.time()) - start >= self.build_timeout: consistency_group_name = ( consistency_group_name if consistency_group_name else consistency_group_id ) message = ('Consistency Group %s failed to reach %s status ' 'within the required time (%s s). ' 'Current status: %s' % (consistency_group_name, status, self.build_timeout, consistency_group_status)) raise exceptions.TimeoutException(message) ############### def create_cgsnapshot(self, consistency_group_id, name=None, description=None, version=LATEST_MICROVERSION): """Create a new cgsnapshot of an existing consistency group.""" uri = 'cgsnapshots' post_body = {'consistency_group_id': consistency_group_id} if name: post_body['name'] = name if description: post_body['description'] = description body = json.dumps({'cgsnapshot': post_body}) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def delete_cgsnapshot(self, cgsnapshot_id, version=LATEST_MICROVERSION): """Delete an existing cgsnapshot.""" uri = 'cgsnapshots/%s' % cgsnapshot_id resp, body = self.delete(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return body def list_cgsnapshots(self, detailed=False, params=None, version=LATEST_MICROVERSION): """Get list of cgsnapshots w/o filters.""" uri = 'cgsnapshots/detail' if detailed else 'cgsnapshots' uri += '?%s' % (urlparse.urlencode(params) if params else '') resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_cgsnapshot_members(self, cgsnapshot_id, version=LATEST_MICROVERSION): """Get list of members of a cgsnapshots.""" uri = 'cgsnapshots/%s/members' % cgsnapshot_id resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def get_cgsnapshot(self, cgsnapshot_id, version=LATEST_MICROVERSION): """Get cgsnapshot info.""" uri = 'cgsnapshots/%s' % cgsnapshot_id resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def update_cgsnapshot(self, cgsnapshot_id, name=None, description=None, version=LATEST_MICROVERSION): """Update an existing cgsnapshot.""" uri = 'cgsnapshots/%s' % cgsnapshot_id post_body = {} if name: post_body['name'] = name if description: post_body['description'] = description body = json.dumps({'cgsnapshot': post_body}) resp, body = self.put(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def cgsnapshot_reset_state(self, id, status, version=LATEST_MICROVERSION): self.reset_state(id, status=status, s_type='cgsnapshots', headers=EXPERIMENTAL, version=version) def cgsnapshot_force_delete(self, id, version=LATEST_MICROVERSION): self.force_delete(id, s_type='cgsnapshots', headers=EXPERIMENTAL, version=version) def wait_for_cgsnapshot_status(self, cgsnapshot_id, status): """Waits for a cgsnapshot to reach a given status.""" body = self.get_cgsnapshot(cgsnapshot_id) cgsnapshot_name = body['name'] cgsnapshot_status = body['status'] start = int(time.time()) while cgsnapshot_status != status: time.sleep(self.build_interval) body = self.get_cgsnapshot(cgsnapshot_id) cgsnapshot_status = body['status'] if 'error' in cgsnapshot_status and status != 'error': raise share_exceptions.CGSnapshotBuildErrorException( cgsnapshot_id=cgsnapshot_id) if int(time.time()) - start >= self.build_timeout: message = ('CGSnapshot %s failed to reach %s status ' 'within the required time (%s s).' % (cgsnapshot_name, status, self.build_timeout)) raise exceptions.TimeoutException(message) ############### def migrate_share(self, share_id, host, notify, version=LATEST_MICROVERSION, action_name=None): if action_name is None: if utils.is_microversion_lt(version, "2.7"): action_name = 'os-migrate_share' elif utils.is_microversion_lt(version, "2.15"): action_name = 'migrate_share' else: action_name = 'migration_start' post_body = { action_name: { 'host': host, 'notify': notify, } } body = json.dumps(post_body) return self.post('shares/%s/action' % share_id, body, headers=EXPERIMENTAL, extra_headers=True, version=version) def migration_complete(self, share_id, version=LATEST_MICROVERSION, action_name='migration_complete'): post_body = { action_name: None, } body = json.dumps(post_body) return self.post('shares/%s/action' % share_id, body, headers=EXPERIMENTAL, extra_headers=True, version=version) def migration_cancel(self, share_id, version=LATEST_MICROVERSION, action_name='migration_cancel'): post_body = { action_name: None, } body = json.dumps(post_body) return self.post('shares/%s/action' % share_id, body, headers=EXPERIMENTAL, extra_headers=True, version=version) def migration_get_progress(self, share_id, version=LATEST_MICROVERSION, action_name='migration_get_progress'): post_body = { action_name: None, } body = json.dumps(post_body) return self.post('shares/%s/action' % share_id, body, headers=EXPERIMENTAL, extra_headers=True, version=version) def reset_task_state( self, share_id, task_state, version=LATEST_MICROVERSION, action_name='reset_task_state'): post_body = { action_name: { 'task_state': task_state, } } body = json.dumps(post_body) return self.post('shares/%s/action' % share_id, body, headers=EXPERIMENTAL, extra_headers=True, version=version) def wait_for_migration_status(self, share_id, dest_host, status, version=LATEST_MICROVERSION): """Waits for a share to migrate to a certain host.""" share = self.get_share(share_id, version=version) migration_timeout = CONF.share.migration_timeout start = int(time.time()) while share['task_state'] != status: time.sleep(self.build_interval) share = self.get_share(share_id, version=version) if share['task_state'] == status: return share elif share['task_state'] == 'migration_error': raise share_exceptions.ShareMigrationException( share_id=share['id'], src=share['host'], dest=dest_host) elif int(time.time()) - start >= migration_timeout: message = ('Share %(share_id)s failed to reach status ' '%(status)s when migrating from host %(src)s to ' 'host %(dest)s within the required time ' '%(timeout)s.' % { 'src': share['host'], 'dest': dest_host, 'share_id': share['id'], 'timeout': self.build_timeout, 'status': status, }) raise exceptions.TimeoutException(message) ################ def create_share_replica(self, share_id, availability_zone=None, version=LATEST_MICROVERSION): """Add a share replica of an existing share.""" uri = "share-replicas" post_body = { 'share_id': share_id, 'availability_zone': availability_zone, } body = json.dumps({'share_replica': post_body}) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def get_share_replica(self, replica_id, version=LATEST_MICROVERSION): """Get the details of share_replica.""" resp, body = self.get("share-replicas/%s" % replica_id, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_replicas(self, share_id=None, version=LATEST_MICROVERSION): """Get list of replicas.""" uri = "share-replicas/detail" uri += ("?share_id=%s" % share_id) if share_id is not None else '' resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def list_share_replicas_summary(self, share_id=None, version=LATEST_MICROVERSION): """Get summary list of replicas.""" uri = "share-replicas" uri += ("?share_id=%s" % share_id) if share_id is not None else '' resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(200, resp.status) return self._parse_resp(body) def delete_share_replica(self, replica_id, version=LATEST_MICROVERSION): """Delete share_replica.""" uri = "share-replicas/%s" % replica_id resp, body = self.delete(uri, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return body def promote_share_replica(self, replica_id, expected_status=202, version=LATEST_MICROVERSION): """Promote a share replica to active state.""" uri = "share-replicas/%s/action" % replica_id post_body = { 'promote': None, } body = json.dumps(post_body) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(expected_status, resp.status) return self._parse_resp(body) def wait_for_share_replica_status(self, replica_id, expected_status, status_attr='status'): """Waits for a replica's status_attr to reach a given status.""" body = self.get_share_replica(replica_id) replica_status = body[status_attr] start = int(time.time()) while replica_status != expected_status: time.sleep(self.build_interval) body = self.get_share_replica(replica_id) replica_status = body[status_attr] if replica_status == expected_status: return if ('error' in replica_status and expected_status != constants.STATUS_ERROR): raise share_exceptions.ShareInstanceBuildErrorException( id=replica_id) if int(time.time()) - start >= self.build_timeout: message = ('The %(status_attr)s of Replica %(id)s failed to ' 'reach %(expected_status)s status within the ' 'required time (%(time)ss). Current ' '%(status_attr)s: %(current_status)s.' % { 'status_attr': status_attr, 'expected_status': expected_status, 'time': self.build_timeout, 'id': replica_id, 'current_status': replica_status, }) raise exceptions.TimeoutException(message) def reset_share_replica_status(self, replica_id, status=constants.STATUS_AVAILABLE, version=LATEST_MICROVERSION): """Reset the status.""" uri = 'share-replicas/%s/action' % replica_id post_body = { 'reset_status': { 'status': status } } body = json.dumps(post_body) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def reset_share_replica_state(self, replica_id, state=constants.REPLICATION_STATE_ACTIVE, version=LATEST_MICROVERSION): """Reset the replication state of a replica.""" uri = 'share-replicas/%s/action' % replica_id post_body = { 'reset_replica_state': { 'replica_state': state } } body = json.dumps(post_body) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) def resync_share_replica(self, replica_id, expected_result=202, version=LATEST_MICROVERSION): """Force an immediate resync of the replica.""" uri = 'share-replicas/%s/action' % replica_id post_body = { 'resync': None } body = json.dumps(post_body) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(expected_result, resp.status) return self._parse_resp(body) def force_delete_share_replica(self, replica_id, version=LATEST_MICROVERSION): """Force delete a replica.""" uri = 'share-replicas/%s/action' % replica_id post_body = { 'force_delete': None } body = json.dumps(post_body) resp, body = self.post(uri, body, headers=EXPERIMENTAL, extra_headers=True, version=version) self.expected_success(202, resp.status) return self._parse_resp(body) manila-2.0.0/manila_tempest_tests/services/__init__.py0000664000567000056710000000000012701407107024260 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/config.py0000664000567000056710000002241512701407107022161 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function from oslo_config import cfg from tempest import config # noqa service_available_group = cfg.OptGroup(name="service_available", title="Available OpenStack Services") ServiceAvailableGroup = [ cfg.BoolOpt("manila", default=True, help="Whether or not manila is expected to be available"), ] share_group = cfg.OptGroup(name="share", title="Share Service Options") ShareGroup = [ cfg.StrOpt("min_api_microversion", default="2.0", help="The minimum api microversion is configured to be the " "value of the minimum microversion supported by Manila."), cfg.StrOpt("max_api_microversion", default="2.15", help="The maximum api microversion is configured to be the " "value of the latest microversion supported by Manila."), cfg.StrOpt("region", default="", help="The share region name to use. If empty, the value " "of identity.region is used instead. If no such region " "is found in the service catalog, the first found one is " "used."), cfg.StrOpt("catalog_type", default="share", help="Catalog type of the Share service."), cfg.StrOpt('endpoint_type', default='publicURL', choices=['public', 'admin', 'internal', 'publicURL', 'adminURL', 'internalURL'], help="The endpoint type to use for the share service."), cfg.BoolOpt("multitenancy_enabled", default=True, help="This option used to determine backend driver type, " "multitenant driver uses share-networks, but " "single-tenant doesn't."), cfg.ListOpt("enable_protocols", default=["nfs", "cifs"], help="First value of list is protocol by default, " "items of list show enabled protocols at all."), cfg.ListOpt("enable_ip_rules_for_protocols", default=["nfs", "cifs", ], help="Selection of protocols, that should " "be covered with ip rule tests"), cfg.ListOpt("enable_user_rules_for_protocols", default=[], help="Selection of protocols, that should " "be covered with user rule tests"), cfg.ListOpt("enable_cert_rules_for_protocols", default=["glusterfs", ], help="Protocols that should be covered with cert rule tests."), cfg.ListOpt("enable_cephx_rules_for_protocols", default=["cephfs", ], help="Protocols to be covered with cephx rule tests."), cfg.StrOpt("username_for_user_rules", default="Administrator", help="Username, that will be used in user tests."), cfg.ListOpt("enable_ro_access_level_for_protocols", default=["nfs", ], help="List of protocols to run tests with ro access level."), # Capabilities cfg.StrOpt("capability_storage_protocol", deprecated_name="storage_protocol", default="NFS_CIFS", help="Backend protocol to target when creating volume types."), cfg.BoolOpt("capability_snapshot_support", help="Defines extra spec that satisfies specific back end " "capability called 'snapshot_support' and will be used " "for setting up custom share type. Defaults to value of " "other config option 'run_snapshot_tests'."), cfg.StrOpt("share_network_id", default="", help="Some backend drivers requires share network " "for share creation. Share network id, that will be " "used for shares. If not set, it won't be used."), cfg.StrOpt("alt_share_network_id", default="", help="Share network id, that will be used for shares" " in alt tenant. If not set, it won't be used"), cfg.StrOpt("admin_share_network_id", default="", help="Share network id, that will be used for shares" " in admin tenant. If not set, it won't be used"), cfg.BoolOpt("multi_backend", default=False, help="Runs Manila multi-backend tests."), cfg.ListOpt("backend_names", default=[], help="Names of share backends, that will be used with " "multibackend tests. Tempest will use first two values."), cfg.IntOpt("share_creation_retry_number", default=0, help="Defines number of retries for share creation. " "It is useful to avoid failures caused by unstable " "environment."), cfg.IntOpt("build_interval", default=3, help="Time in seconds between share availability checks."), cfg.IntOpt("build_timeout", default=500, help="Timeout in seconds to wait for a share to become" "available."), cfg.BoolOpt("suppress_errors_in_cleanup", default=False, help="Whether to suppress errors with clean up operation " "or not. There are cases when we may want to skip " "such errors and catch only test errors."), # Switching ON/OFF test suites filtered by features cfg.BoolOpt("run_quota_tests", default=True, help="Defines whether to run quota tests or not."), cfg.BoolOpt("run_extend_tests", default=True, help="Defines whether to run share extend tests or not. " "Disable this feature if used driver doesn't " "support it."), cfg.BoolOpt("run_shrink_tests", default=True, help="Defines whether to run share shrink tests or not. " "Disable this feature if used driver doesn't " "support it."), cfg.BoolOpt("run_snapshot_tests", default=True, help="Defines whether to run tests that use share snapshots " "or not. Disable this feature if used driver doesn't " "support it."), cfg.BoolOpt("run_consistency_group_tests", default=True, help="Defines whether to run consistency group tests or not. " "Disable this feature if used driver doesn't support " "it."), cfg.BoolOpt("run_replication_tests", default=False, help="Defines whether to run replication tests or not. " "Enable this feature if the driver is configured " "for replication."), cfg.BoolOpt("run_migration_tests", default=False, help="Enable or disable migration tests."), cfg.BoolOpt("run_manage_unmanage_tests", default=False, help="Defines whether to run manage/unmanage tests or not. " "These test may leave orphaned resources, so be careful " "enabling this opt."), cfg.BoolOpt("run_manage_unmanage_snapshot_tests", default=False, help="Defines whether to run manage/unmanage snapshot tests " "or not. These tests may leave orphaned resources, so be " "careful enabling this opt."), cfg.StrOpt("image_with_share_tools", default="manila-service-image", help="Image name for vm booting with nfs/smb clients tool."), cfg.StrOpt("image_username", default="manila", help="Image username."), cfg.StrOpt("image_password", help="Image password. Should be used for " "'image_with_share_tools' without Nova Metadata support."), cfg.StrOpt("client_vm_flavor_ref", default="100", help="Flavor used for client vm in scenario tests."), cfg.IntOpt("migration_timeout", default=1500, help="Time to wait for share migration before " "timing out (seconds)."), cfg.StrOpt("default_share_type_name", help="Default share type name to use in tempest tests."), cfg.StrOpt("backend_replication_type", default='none', choices=['none', 'writable', 'readable', 'dr'], help="Specify the replication type supported by the backend."), ] manila-2.0.0/manila_tempest_tests/share_exceptions.py0000664000567000056710000000502312701407107024253 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import exceptions class ShareBuildErrorException(exceptions.TempestException): message = "Share %(share_id)s failed to build and is in ERROR status" class ShareInstanceBuildErrorException(exceptions.TempestException): message = "Share instance %(id)s failed to build and is in ERROR status" class ConsistencyGroupBuildErrorException(exceptions.TempestException): message = ("Consistency group %(consistency_group_id)s failed to build " "and is in ERROR status") class AccessRuleBuildErrorException(exceptions.TempestException): message = "Share's rule with id %(rule_id)s is in ERROR status" class SnapshotBuildErrorException(exceptions.TempestException): message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status" class CGSnapshotBuildErrorException(exceptions.TempestException): message = ("CGSnapshot %(cgsnapshot_id)s failed to build and is in ERROR " "status") class ShareProtocolNotSpecified(exceptions.TempestException): message = "Share can not be created, share protocol is not specified" class ShareNetworkNotSpecified(exceptions.TempestException): message = "Share can not be created, share network not specified" class NoAvailableNetwork(exceptions.TempestException): message = "No available network for service VM" class InvalidResource(exceptions.TempestException): message = "Provided invalid resource: %(message)s" class ShareMigrationException(exceptions.TempestException): message = ("Share %(share_id)s failed to migrate from " "host %(src)s to host %(dest)s.") class ResourceReleaseFailed(exceptions.TempestException): message = "Failed to release resource '%(res_type)s' with id '%(res_id)s'." class ShareReplicationTypeException(exceptions.TempestException): message = ("Option backend_replication_type is set to incorrect value: " "%(replication_type)s") manila-2.0.0/manila_tempest_tests/common/0000775000567000056710000000000012701407265021633 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/common/constants.py0000664000567000056710000000234112701407107024214 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_ERROR = 'error' STATUS_AVAILABLE = 'available' STATUS_ERROR_DELETING = 'error_deleting' TEMPEST_MANILA_PREFIX = 'tempest-manila' REPLICATION_STYLE_READABLE = 'readable' REPLICATION_STYLE_WRITABLE = 'writable' REPLICATION_STYLE_DR = 'dr' REPLICATION_TYPE_CHOICES = ( REPLICATION_STYLE_READABLE, REPLICATION_STYLE_WRITABLE, REPLICATION_STYLE_DR, ) REPLICATION_PROMOTION_CHOICES = ( REPLICATION_STYLE_READABLE, REPLICATION_STYLE_DR, ) REPLICATION_STATE_ACTIVE = 'active' REPLICATION_STATE_IN_SYNC = 'in_sync' REPLICATION_STATE_OUT_OF_SYNC = 'out_of_sync' RULE_STATE_ACTIVE = 'active' RULE_STATE_OUT_OF_SYNC = 'out_of_sync' RULE_STATE_ERROR = 'error' manila-2.0.0/manila_tempest_tests/common/__init__.py0000664000567000056710000000000012701407107023725 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila_tempest_tests/README.rst0000664000567000056710000000017712701407107022032 0ustar jenkinsjenkins00000000000000==================== Tempest Integration ==================== This directory contains Tempest tests to cover Manila project. manila-2.0.0/manila.egg-info/0000775000567000056710000000000012701407265017052 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila.egg-info/requires.txt0000664000567000056710000000142412701407264021452 0ustar jenkinsjenkins00000000000000pbr>=1.6 alembic>=0.8.0 Babel>=1.3 eventlet!=0.18.3,>=0.18.2 greenlet>=0.3.2 lxml>=2.3 netaddr!=0.7.16,>=0.7.12 oslo.config>=3.7.0 oslo.context>=0.2.0 oslo.db>=4.1.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.messaging>=4.0.0 oslo.middleware>=3.0.0 oslo.policy>=0.5.0 oslo.rootwrap>=2.0.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 oslo.concurrency>=3.5.0 paramiko>=1.16.0 Paste PasteDeploy>=1.5.0 python-neutronclient!=4.1.0,>=2.6.0 keystoneauth1>=2.1.0 keystonemiddleware!=4.1.0,>=4.0.0 requests!=2.9.0,>=2.8.1 retrying!=1.3.0,>=1.2.3 six>=1.9.0 SQLAlchemy<1.1.0,>=1.0.10 stevedore>=1.5.0 python-cinderclient>=1.3.1 python-novaclient!=2.33.0,>=2.29.0 WebOb>=1.2.3 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 manila-2.0.0/manila.egg-info/dependency_links.txt0000664000567000056710000000000112701407264023117 0ustar jenkinsjenkins00000000000000 manila-2.0.0/manila.egg-info/not-zip-safe0000664000567000056710000000000112701407260021273 0ustar jenkinsjenkins00000000000000 manila-2.0.0/manila.egg-info/SOURCES.txt0000664000567000056710000007473012701407265020751 0ustar jenkinsjenkins00000000000000.coveragerc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg openstack-common.conf pylintrc requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini contrib/ci/common.sh contrib/ci/post_test_hook.sh contrib/ci/pre_test_hook.sh contrib/share_driver_hooks/README.rst contrib/share_driver_hooks/zaqar_notification.py contrib/share_driver_hooks/zaqar_notification_example_consumer.py contrib/share_driver_hooks/zaqarclientwrapper.py devstack/README.rst devstack/plugin.sh devstack/settings devstack/files/debs/manila devstack/files/rpms/manila devstack/files/rpms-suse/manila doc/.gitignore doc/Makefile doc/README.rst doc/find_autodoc_modules.sh doc/generate_autodoc_index.sh doc/ext/__init__.py doc/ext/manila_autodoc.py doc/ext/manila_todo.py doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/_ga/layout.html doc/source/adminref/index.rst doc/source/adminref/intro.rst doc/source/adminref/multi_backends.rst doc/source/adminref/network_plugins.rst doc/source/adminref/quick_start.rst doc/source/devref/addmethod.openstackapi.rst doc/source/devref/api.rst doc/source/devref/api_microversion_dev.rst doc/source/devref/api_microversion_history.rst doc/source/devref/architecture.rst doc/source/devref/auth.rst doc/source/devref/capabilities_and_extra_specs.rst doc/source/devref/cephfs_native_driver.rst doc/source/devref/database.rst doc/source/devref/development.environment.rst doc/source/devref/driver_requirements.rst doc/source/devref/emc_isilon_driver.rst doc/source/devref/emc_vnx_driver.rst doc/source/devref/experimental_apis.rst doc/source/devref/export_location_metadata.rst doc/source/devref/fakes.rst doc/source/devref/ganesha.rst doc/source/devref/generic_driver.rst doc/source/devref/gerrit.rst doc/source/devref/glusterfs_driver.rst doc/source/devref/glusterfs_native_driver.rst doc/source/devref/gpfs_driver.rst doc/source/devref/hdfs_native_driver.rst doc/source/devref/hds_hnas_driver.rst doc/source/devref/hpe_3par_driver.rst doc/source/devref/huawei_nas_driver.rst doc/source/devref/i18n.rst doc/source/devref/index.rst doc/source/devref/intro.rst doc/source/devref/jenkins.rst doc/source/devref/launchpad.rst doc/source/devref/manila.rst doc/source/devref/netapp_cluster_mode_driver.rst doc/source/devref/pool-aware-manila-scheduler.rst doc/source/devref/rpc.rst doc/source/devref/scheduler.rst doc/source/devref/services.rst doc/source/devref/share.rst doc/source/devref/share_back_ends_feature_support_mapping.rst doc/source/devref/share_hooks.rst doc/source/devref/tegile_driver.rst doc/source/devref/threading.rst doc/source/devref/unit_tests.rst doc/source/devref/zfs_on_linux_driver.rst doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/hds_network.jpg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/man/index.rst doc/source/man/manila-manage.rst etc/manila/README.manila.conf etc/manila/api-paste.ini etc/manila/logging_sample.conf etc/manila/policy.json etc/manila/rootwrap.conf etc/manila/rootwrap.d/share.filters etc/oslo-config-generator/manila.conf manila/__init__.py manila/context.py manila/exception.py manila/i18n.py manila/manager.py manila/opts.py manila/policy.py manila/quota.py manila/rpc.py manila/service.py manila/test.py manila/utils.py manila/version.py manila/wsgi.py manila.egg-info/PKG-INFO manila.egg-info/SOURCES.txt manila.egg-info/dependency_links.txt manila.egg-info/entry_points.txt manila.egg-info/not-zip-safe manila.egg-info/pbr.json manila.egg-info/requires.txt manila.egg-info/top_level.txt manila/api/__init__.py manila/api/auth.py manila/api/common.py manila/api/extensions.py manila/api/sizelimit.py manila/api/urlmap.py manila/api/versions.py manila/api/contrib/__init__.py manila/api/middleware/__init__.py manila/api/middleware/auth.py manila/api/middleware/fault.py manila/api/middleware/sizelimit.py manila/api/openstack/__init__.py manila/api/openstack/api_version_request.py manila/api/openstack/rest_api_version_history.rst manila/api/openstack/urlmap.py manila/api/openstack/versioned_method.py manila/api/openstack/wsgi.py manila/api/v1/__init__.py manila/api/v1/limits.py manila/api/v1/router.py manila/api/v1/scheduler_stats.py manila/api/v1/security_service.py manila/api/v1/share_manage.py manila/api/v1/share_metadata.py manila/api/v1/share_networks.py manila/api/v1/share_servers.py manila/api/v1/share_snapshots.py manila/api/v1/share_types_extra_specs.py manila/api/v1/share_unmanage.py manila/api/v1/shares.py manila/api/v2/__init__.py manila/api/v2/availability_zones.py manila/api/v2/cgsnapshots.py manila/api/v2/consistency_groups.py manila/api/v2/quota_class_sets.py manila/api/v2/quota_sets.py manila/api/v2/router.py manila/api/v2/services.py manila/api/v2/share_export_locations.py manila/api/v2/share_instance_export_locations.py manila/api/v2/share_instances.py manila/api/v2/share_replicas.py manila/api/v2/share_snapshots.py manila/api/v2/share_types.py manila/api/v2/shares.py manila/api/views/__init__.py manila/api/views/availability_zones.py manila/api/views/cgsnapshots.py manila/api/views/consistency_groups.py manila/api/views/export_locations.py manila/api/views/limits.py manila/api/views/quota_class_sets.py manila/api/views/quota_sets.py manila/api/views/scheduler_stats.py manila/api/views/security_service.py manila/api/views/services.py manila/api/views/share_instance.py manila/api/views/share_networks.py manila/api/views/share_replicas.py manila/api/views/share_servers.py manila/api/views/share_snapshots.py manila/api/views/shares.py manila/api/views/types.py manila/api/views/versions.py manila/cmd/__init__.py manila/cmd/all.py manila/cmd/api.py manila/cmd/data.py manila/cmd/manage.py manila/cmd/scheduler.py manila/cmd/share.py manila/common/__init__.py manila/common/client_auth.py manila/common/config.py manila/common/constants.py manila/compute/__init__.py manila/compute/nova.py manila/consistency_group/__init__.py manila/consistency_group/api.py manila/data/__init__.py manila/data/helper.py manila/data/manager.py manila/data/rpcapi.py manila/data/utils.py manila/db/__init__.py manila/db/api.py manila/db/base.py manila/db/migration.py manila/db/migrations/__init__.py manila/db/migrations/alembic.ini manila/db/migrations/utils.py manila/db/migrations/alembic/__init__.py manila/db/migrations/alembic/env.py manila/db/migrations/alembic/migration.py manila/db/migrations/alembic/script.py.mako manila/db/migrations/alembic/versions/162a3e673105_manila_init.py manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_networks.py manila/db/migrations/alembic/versions/1f0bd302c1a6_add_availability_zones_table.py manila/db/migrations/alembic/versions/211836bf835c_add_access_level.py manila/db/migrations/alembic/versions/293fac1130ca_add_replication_attrs.py manila/db/migrations/alembic/versions/30cb96d995fa_add_is_public_column_for_share.py manila/db/migrations/alembic/versions/323840a08dc4_add_shares_task_state.py manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.py manila/db/migrations/alembic/versions/3651e16d7c43_add_consistency_groups.py manila/db/migrations/alembic/versions/38e632621e5a_change_volume_type_to_share_type.py manila/db/migrations/alembic/versions/3a482171410f_add_drivers_private_data_table.py manila/db/migrations/alembic/versions/3db9992c30f3_transform_statuses_to_lowercase.py manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_location.py manila/db/migrations/alembic/versions/5077ffcc5f1c_add_share_instances.py manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to_network_allocations_table.py manila/db/migrations/alembic/versions/533646c7af38_remove_unused_attr_status.py manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_share_types.py manila/db/migrations/alembic/versions/56cdbe267881_add_share_export_locations_table.py manila/db/migrations/alembic/versions/579c267fbb4d_add_share_instances_access_map.py manila/db/migrations/alembic/versions/59eb64046740_add_required_extra_spec.py manila/db/migrations/alembic/versions/dda6de06349_add_export_locations_metadata.py manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snapshot_instances.py manila/db/migrations/alembic/versions/ef0c02b4366_add_share_type_projects.py manila/db/sqlalchemy/__init__.py manila/db/sqlalchemy/api.py manila/db/sqlalchemy/models.py manila/db/sqlalchemy/query.py manila/hacking/__init__.py manila/hacking/checks.py manila/network/__init__.py manila/network/nova_network_plugin.py manila/network/standalone_network_plugin.py manila/network/linux/__init__.py manila/network/linux/interface.py manila/network/linux/ip_lib.py manila/network/linux/ovs_lib.py manila/network/neutron/__init__.py manila/network/neutron/api.py manila/network/neutron/constants.py manila/network/neutron/neutron_network_plugin.py manila/scheduler/__init__.py manila/scheduler/base_handler.py manila/scheduler/host_manager.py manila/scheduler/manager.py manila/scheduler/rpcapi.py manila/scheduler/scheduler_options.py manila/scheduler/drivers/__init__.py manila/scheduler/drivers/base.py manila/scheduler/drivers/chance.py manila/scheduler/drivers/filter.py manila/scheduler/drivers/simple.py manila/scheduler/filters/__init__.py manila/scheduler/filters/availability_zone.py manila/scheduler/filters/base.py manila/scheduler/filters/base_host.py manila/scheduler/filters/capabilities.py manila/scheduler/filters/capacity.py manila/scheduler/filters/consistency_group.py manila/scheduler/filters/extra_specs_ops.py manila/scheduler/filters/ignore_attempted_hosts.py manila/scheduler/filters/json.py manila/scheduler/filters/retry.py manila/scheduler/filters/share_replication.py manila/scheduler/weighers/__init__.py manila/scheduler/weighers/base.py manila/scheduler/weighers/base_host.py manila/scheduler/weighers/capacity.py manila/scheduler/weighers/pool.py manila/share/__init__.py manila/share/access.py manila/share/api.py manila/share/configuration.py manila/share/driver.py manila/share/drivers_private_data.py manila/share/hook.py manila/share/manager.py manila/share/migration.py manila/share/rpcapi.py manila/share/share_types.py manila/share/utils.py manila/share/drivers/__init__.py manila/share/drivers/generic.py manila/share/drivers/helpers.py manila/share/drivers/lvm.py manila/share/drivers/service_instance.py manila/share/drivers/cephfs/__init__.py manila/share/drivers/cephfs/cephfs_native.py manila/share/drivers/emc/__init__.py manila/share/drivers/emc/driver.py manila/share/drivers/emc/plugin_manager.py manila/share/drivers/emc/plugins/__init__.py manila/share/drivers/emc/plugins/base.py manila/share/drivers/emc/plugins/isilon/__init__.py manila/share/drivers/emc/plugins/isilon/isilon.py manila/share/drivers/emc/plugins/isilon/isilon_api.py manila/share/drivers/emc/plugins/vnx/__init__.py manila/share/drivers/emc/plugins/vnx/connection.py manila/share/drivers/emc/plugins/vnx/connector.py manila/share/drivers/emc/plugins/vnx/constants.py manila/share/drivers/emc/plugins/vnx/object_manager.py manila/share/drivers/emc/plugins/vnx/utils.py manila/share/drivers/emc/plugins/vnx/xml_api_parser.py manila/share/drivers/ganesha/__init__.py manila/share/drivers/ganesha/manager.py manila/share/drivers/ganesha/utils.py manila/share/drivers/ganesha/conf/00-base-export-template.conf manila/share/drivers/glusterfs/__init__.py manila/share/drivers/glusterfs/common.py manila/share/drivers/glusterfs/glusterfs_native.py manila/share/drivers/glusterfs/layout.py manila/share/drivers/glusterfs/layout_directory.py manila/share/drivers/glusterfs/layout_volume.py manila/share/drivers/glusterfs/conf/10-glusterfs-export-template.conf manila/share/drivers/hdfs/__init__.py manila/share/drivers/hdfs/hdfs_native.py manila/share/drivers/hitachi/__init__.py manila/share/drivers/hitachi/hds_hnas.py manila/share/drivers/hitachi/ssh.py manila/share/drivers/hpe/__init__.py manila/share/drivers/hpe/hpe_3par_driver.py manila/share/drivers/hpe/hpe_3par_mediator.py manila/share/drivers/huawei/__init__.py manila/share/drivers/huawei/base.py manila/share/drivers/huawei/constants.py manila/share/drivers/huawei/huawei_nas.py manila/share/drivers/huawei/huawei_utils.py manila/share/drivers/huawei/v3/__init__.py manila/share/drivers/huawei/v3/connection.py manila/share/drivers/huawei/v3/helper.py manila/share/drivers/huawei/v3/smartx.py manila/share/drivers/ibm/__init__.py manila/share/drivers/ibm/ganesha_utils.py manila/share/drivers/ibm/gpfs.py manila/share/drivers/netapp/__init__.py manila/share/drivers/netapp/common.py manila/share/drivers/netapp/options.py manila/share/drivers/netapp/utils.py manila/share/drivers/netapp/dataontap/__init__.py manila/share/drivers/netapp/dataontap/client/__init__.py manila/share/drivers/netapp/dataontap/client/api.py manila/share/drivers/netapp/dataontap/client/client_base.py manila/share/drivers/netapp/dataontap/client/client_cmode.py manila/share/drivers/netapp/dataontap/cluster_mode/__init__.py manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_single_svm.py manila/share/drivers/netapp/dataontap/protocols/__init__.py manila/share/drivers/netapp/dataontap/protocols/base.py manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py manila/share/drivers/quobyte/__init__.py manila/share/drivers/quobyte/jsonrpc.py manila/share/drivers/quobyte/quobyte.py manila/share/drivers/tegile/__init__.py manila/share/drivers/tegile/tegile.py manila/share/drivers/windows/__init__.py manila/share/drivers/windows/service_instance.py manila/share/drivers/windows/windows_smb_driver.py manila/share/drivers/windows/windows_smb_helper.py manila/share/drivers/windows/windows_utils.py manila/share/drivers/windows/winrm_helper.py manila/share/drivers/zfsonlinux/__init__.py manila/share/drivers/zfsonlinux/driver.py manila/share/drivers/zfsonlinux/utils.py manila/share/drivers/zfssa/__init__.py manila/share/drivers/zfssa/restclient.py manila/share/drivers/zfssa/zfssarest.py manila/share/drivers/zfssa/zfssashare.py manila/share/hooks/__init__.py manila/testing/README.rst manila/tests/__init__.py manila/tests/conf_fixture.py manila/tests/db_utils.py manila/tests/declare_conf.py manila/tests/fake_client_exception_class.py manila/tests/fake_compute.py manila/tests/fake_driver.py manila/tests/fake_network.py manila/tests/fake_notifier.py manila/tests/fake_service_instance.py manila/tests/fake_share.py manila/tests/fake_utils.py manila/tests/fake_volume.py manila/tests/fake_zfssa.py manila/tests/policy.json manila/tests/runtime_conf.py manila/tests/test_api.py manila/tests/test_conf.py manila/tests/test_context.py manila/tests/test_exception.py manila/tests/test_hacking.py manila/tests/test_manager.py manila/tests/test_misc.py manila/tests/test_network.py manila/tests/test_policy.py manila/tests/test_quota.py manila/tests/test_service.py manila/tests/test_test.py manila/tests/test_test_utils.py manila/tests/test_utils.py manila/tests/test_wsgi.py manila/tests/utils.py manila/tests/api/__init__.py manila/tests/api/common.py manila/tests/api/fakes.py manila/tests/api/test_common.py manila/tests/api/test_extensions.py manila/tests/api/test_middleware.py manila/tests/api/test_versions.py manila/tests/api/test_wsgi.py manila/tests/api/contrib/__init__.py manila/tests/api/contrib/stubs.py manila/tests/api/extensions/__init__.py manila/tests/api/extensions/foxinsocks.py manila/tests/api/middleware/__init__.py manila/tests/api/middleware/test_auth.py manila/tests/api/middleware/test_faults.py manila/tests/api/openstack/__init__.py manila/tests/api/openstack/test_api_version_request.py manila/tests/api/openstack/test_versioned_method.py manila/tests/api/openstack/test_wsgi.py manila/tests/api/v1/__init__.py manila/tests/api/v1/stubs.py manila/tests/api/v1/test_limits.py manila/tests/api/v1/test_scheduler_stats.py manila/tests/api/v1/test_security_service.py manila/tests/api/v1/test_share_manage.py manila/tests/api/v1/test_share_metadata.py manila/tests/api/v1/test_share_networks.py manila/tests/api/v1/test_share_servers.py manila/tests/api/v1/test_share_snapshots.py manila/tests/api/v1/test_share_types_extra_specs.py manila/tests/api/v1/test_share_unmanage.py manila/tests/api/v1/test_shares.py manila/tests/api/v2/__init__.py manila/tests/api/v2/test_availability_zones.py manila/tests/api/v2/test_cgsnapshots.py manila/tests/api/v2/test_consistency_groups.py manila/tests/api/v2/test_quota_class_sets.py manila/tests/api/v2/test_quota_sets.py manila/tests/api/v2/test_services.py manila/tests/api/v2/test_share_export_locations.py manila/tests/api/v2/test_share_instance_export_locations.py manila/tests/api/v2/test_share_instances.py manila/tests/api/v2/test_share_replicas.py manila/tests/api/v2/test_share_snapshots.py manila/tests/api/v2/test_share_types.py manila/tests/api/v2/test_shares.py manila/tests/api/views/__init__.py manila/tests/api/views/test_scheduler_stats.py manila/tests/api/views/test_share_networks.py manila/tests/api/views/test_shares.py manila/tests/api/views/test_versions.py manila/tests/cmd/__init__.py manila/tests/cmd/test_all.py manila/tests/cmd/test_api.py manila/tests/cmd/test_data.py manila/tests/cmd/test_manage.py manila/tests/cmd/test_scheduler.py manila/tests/cmd/test_share.py manila/tests/common/__init__.py manila/tests/common/test_client_auth.py manila/tests/common/test_config.py manila/tests/compute/__init__.py manila/tests/compute/test_nova.py manila/tests/consistency_group/__init__.py manila/tests/consistency_group/test_api.py manila/tests/data/__init__.py manila/tests/data/test_helper.py manila/tests/data/test_manager.py manila/tests/data/test_rpcapi.py manila/tests/data/test_utils.py manila/tests/db/__init__.py manila/tests/db/fakes.py manila/tests/db/test_api.py manila/tests/db/test_migration.py manila/tests/db/migrations/__init__.py manila/tests/db/migrations/test_utils.py manila/tests/db/migrations/alembic/__init__.py manila/tests/db/migrations/alembic/migrations_data_checks.py manila/tests/db/migrations/alembic/test_migration.py manila/tests/db/sqlalchemy/__init__.py manila/tests/db/sqlalchemy/test_api.py manila/tests/db/sqlalchemy/test_models.py manila/tests/integrated/__init__.py manila/tests/integrated/integrated_helpers.py manila/tests/integrated/test_extensions.py manila/tests/integrated/test_login.py manila/tests/integrated/api/__init__.py manila/tests/integrated/api/client.py manila/tests/monkey_patch_example/__init__.py manila/tests/monkey_patch_example/example_a.py manila/tests/monkey_patch_example/example_b.py manila/tests/network/__init__.py manila/tests/network/test_nova_network_plugin.py manila/tests/network/test_standalone_network_plugin.py manila/tests/network/linux/__init__.py manila/tests/network/linux/test_interface.py manila/tests/network/linux/test_ip_lib.py manila/tests/network/linux/test_ovs_lib.py manila/tests/network/neutron/__init__.py manila/tests/network/neutron/test_neutron_api.py manila/tests/network/neutron/test_neutron_plugin.py manila/tests/scheduler/__init__.py manila/tests/scheduler/fakes.py manila/tests/scheduler/test_host_manager.py manila/tests/scheduler/test_manager.py manila/tests/scheduler/test_rpcapi.py manila/tests/scheduler/test_scheduler_options.py manila/tests/scheduler/drivers/__init__.py manila/tests/scheduler/drivers/test_base.py manila/tests/scheduler/drivers/test_filter.py manila/tests/scheduler/drivers/test_simple.py manila/tests/scheduler/filters/__init__.py manila/tests/scheduler/filters/test_availability_zone.py manila/tests/scheduler/filters/test_base.py manila/tests/scheduler/filters/test_base_host.py manila/tests/scheduler/filters/test_capabilities.py manila/tests/scheduler/filters/test_capacity.py manila/tests/scheduler/filters/test_extra_specs_ops.py manila/tests/scheduler/filters/test_ignore_attempted_hosts.py manila/tests/scheduler/filters/test_json.py manila/tests/scheduler/filters/test_retry.py manila/tests/scheduler/filters/test_share_replication.py manila/tests/scheduler/weighers/__init__.py manila/tests/scheduler/weighers/test_base.py manila/tests/scheduler/weighers/test_capacity.py manila/tests/scheduler/weighers/test_pool.py manila/tests/share/__init__.py manila/tests/share/test_access.py manila/tests/share/test_api.py manila/tests/share/test_driver.py manila/tests/share/test_drivers_private_data.py manila/tests/share/test_hook.py manila/tests/share/test_manager.py manila/tests/share/test_migration.py manila/tests/share/test_rpcapi.py manila/tests/share/test_share_types.py manila/tests/share/test_share_utils.py manila/tests/share/drivers/__init__.py manila/tests/share/drivers/test_ganesha.py manila/tests/share/drivers/test_generic.py manila/tests/share/drivers/test_glusterfs.py manila/tests/share/drivers/test_helpers.py manila/tests/share/drivers/test_lvm.py manila/tests/share/drivers/test_service_instance.py manila/tests/share/drivers/cephfs/__init__.py manila/tests/share/drivers/cephfs/test_cephfs_native.py manila/tests/share/drivers/emc/__init__.py manila/tests/share/drivers/emc/test_driver.py manila/tests/share/drivers/emc/plugins/__init__.py manila/tests/share/drivers/emc/plugins/isilon/__init__.py manila/tests/share/drivers/emc/plugins/isilon/test_isilon.py manila/tests/share/drivers/emc/plugins/isilon/test_isilon_api.py manila/tests/share/drivers/emc/plugins/vnx/__init__.py manila/tests/share/drivers/emc/plugins/vnx/fakes.py manila/tests/share/drivers/emc/plugins/vnx/test_connection.py manila/tests/share/drivers/emc/plugins/vnx/test_connector.py manila/tests/share/drivers/emc/plugins/vnx/test_object_manager.py manila/tests/share/drivers/emc/plugins/vnx/utils.py manila/tests/share/drivers/ganesha/__init__.py manila/tests/share/drivers/ganesha/test_manager.py manila/tests/share/drivers/ganesha/test_utils.py manila/tests/share/drivers/glusterfs/__init__.py manila/tests/share/drivers/glusterfs/test_common.py manila/tests/share/drivers/glusterfs/test_glusterfs_native.py manila/tests/share/drivers/glusterfs/test_layout.py manila/tests/share/drivers/glusterfs/test_layout_directory.py manila/tests/share/drivers/glusterfs/test_layout_volume.py manila/tests/share/drivers/hdfs/__init__.py manila/tests/share/drivers/hdfs/test_hdfs_native.py manila/tests/share/drivers/hitachi/__init__.py manila/tests/share/drivers/hitachi/test_hds_hnas.py manila/tests/share/drivers/hitachi/test_ssh.py manila/tests/share/drivers/hpe/__init__.py manila/tests/share/drivers/hpe/test_hpe_3par_constants.py manila/tests/share/drivers/hpe/test_hpe_3par_driver.py manila/tests/share/drivers/hpe/test_hpe_3par_mediator.py manila/tests/share/drivers/huawei/__init__.py manila/tests/share/drivers/huawei/test_huawei_nas.py manila/tests/share/drivers/ibm/__init__.py manila/tests/share/drivers/ibm/test_ganesha_utils.py manila/tests/share/drivers/ibm/test_gpfs.py manila/tests/share/drivers/netapp/__init__.py manila/tests/share/drivers/netapp/fakes.py manila/tests/share/drivers/netapp/test_common.py manila/tests/share/drivers/netapp/test_utils.py manila/tests/share/drivers/netapp/dataontap/__init__.py manila/tests/share/drivers/netapp/dataontap/fakes.py manila/tests/share/drivers/netapp/dataontap/client/__init__.py manila/tests/share/drivers/netapp/dataontap/client/fakes.py manila/tests/share/drivers/netapp/dataontap/client/test_api.py manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/__init__.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_driver_interfaces.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_single_svm.py manila/tests/share/drivers/netapp/dataontap/protocols/__init__.py manila/tests/share/drivers/netapp/dataontap/protocols/fakes.py manila/tests/share/drivers/netapp/dataontap/protocols/test_base.py manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py manila/tests/share/drivers/netapp/dataontap/protocols/test_nfs_cmode.py manila/tests/share/drivers/quobyte/__init__.py manila/tests/share/drivers/quobyte/test_jsonrpc.py manila/tests/share/drivers/quobyte/test_quobyte.py manila/tests/share/drivers/tegile/__init__.py manila/tests/share/drivers/tegile/test_tegile.py manila/tests/share/drivers/windows/__init__.py manila/tests/share/drivers/windows/test_service_instance.py manila/tests/share/drivers/windows/test_windows_smb_driver.py manila/tests/share/drivers/windows/test_windows_smb_helper.py manila/tests/share/drivers/windows/test_windows_utils.py manila/tests/share/drivers/windows/test_winrm_helper.py manila/tests/share/drivers/zfsonlinux/__init__.py manila/tests/share/drivers/zfsonlinux/test_driver.py manila/tests/share/drivers/zfsonlinux/test_utils.py manila/tests/share/drivers/zfssa/__init__.py manila/tests/share/drivers/zfssa/test_zfssarest.py manila/tests/share/drivers/zfssa/test_zfssashare.py manila/tests/var/ca.crt manila/tests/var/certificate.crt manila/tests/var/privatekey.key manila/tests/volume/__init__.py manila/tests/volume/test_cinder.py manila/tests/xenapi/__init__.py manila/volume/__init__.py manila/volume/cinder.py manila_tempest_tests/README.rst manila_tempest_tests/__init__.py manila_tempest_tests/clients_share.py manila_tempest_tests/config.py manila_tempest_tests/plugin.py manila_tempest_tests/share_exceptions.py manila_tempest_tests/utils.py manila_tempest_tests/common/__init__.py manila_tempest_tests/common/constants.py manila_tempest_tests/services/__init__.py manila_tempest_tests/services/share/__init__.py manila_tempest_tests/services/share/json/__init__.py manila_tempest_tests/services/share/json/shares_client.py manila_tempest_tests/services/share/v2/__init__.py manila_tempest_tests/services/share/v2/json/__init__.py manila_tempest_tests/services/share/v2/json/shares_client.py manila_tempest_tests/tests/__init__.py manila_tempest_tests/tests/api/__init__.py manila_tempest_tests/tests/api/base.py manila_tempest_tests/tests/api/test_availability_zones.py manila_tempest_tests/tests/api/test_availability_zones_negative.py manila_tempest_tests/tests/api/test_consistency_group_actions.py manila_tempest_tests/tests/api/test_consistency_groups.py manila_tempest_tests/tests/api/test_consistency_groups_negative.py manila_tempest_tests/tests/api/test_extensions.py manila_tempest_tests/tests/api/test_limits.py manila_tempest_tests/tests/api/test_metadata.py manila_tempest_tests/tests/api/test_metadata_negative.py manila_tempest_tests/tests/api/test_microversions.py manila_tempest_tests/tests/api/test_quotas.py manila_tempest_tests/tests/api/test_quotas_negative.py manila_tempest_tests/tests/api/test_replication.py manila_tempest_tests/tests/api/test_replication_negative.py manila_tempest_tests/tests/api/test_replication_snapshots.py manila_tempest_tests/tests/api/test_rules.py manila_tempest_tests/tests/api/test_rules_negative.py manila_tempest_tests/tests/api/test_scheduler_stats_negative.py manila_tempest_tests/tests/api/test_security_services.py manila_tempest_tests/tests/api/test_security_services_mapping.py manila_tempest_tests/tests/api/test_security_services_mapping_negative.py manila_tempest_tests/tests/api/test_security_services_negative.py manila_tempest_tests/tests/api/test_share_networks.py manila_tempest_tests/tests/api/test_share_networks_negative.py manila_tempest_tests/tests/api/test_share_types_negative.py manila_tempest_tests/tests/api/test_shares.py manila_tempest_tests/tests/api/test_shares_actions.py manila_tempest_tests/tests/api/test_shares_actions_negative.py manila_tempest_tests/tests/api/test_shares_negative.py manila_tempest_tests/tests/api/admin/__init__.py manila_tempest_tests/tests/api/admin/test_admin_actions.py manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py manila_tempest_tests/tests/api/admin/test_consistency_group_actions.py manila_tempest_tests/tests/api/admin/test_consistency_groups.py manila_tempest_tests/tests/api/admin/test_consistency_groups_negative.py manila_tempest_tests/tests/api/admin/test_export_locations.py manila_tempest_tests/tests/api/admin/test_export_locations_negative.py manila_tempest_tests/tests/api/admin/test_migration.py manila_tempest_tests/tests/api/admin/test_migration_negative.py manila_tempest_tests/tests/api/admin/test_multi_backend.py manila_tempest_tests/tests/api/admin/test_quotas.py manila_tempest_tests/tests/api/admin/test_quotas_negative.py manila_tempest_tests/tests/api/admin/test_replication.py manila_tempest_tests/tests/api/admin/test_scheduler_stats.py manila_tempest_tests/tests/api/admin/test_security_services.py manila_tempest_tests/tests/api/admin/test_services.py manila_tempest_tests/tests/api/admin/test_services_negative.py manila_tempest_tests/tests/api/admin/test_share_instances.py manila_tempest_tests/tests/api/admin/test_share_manage.py manila_tempest_tests/tests/api/admin/test_share_networks.py manila_tempest_tests/tests/api/admin/test_share_servers.py manila_tempest_tests/tests/api/admin/test_share_servers_negative.py manila_tempest_tests/tests/api/admin/test_share_types.py manila_tempest_tests/tests/api/admin/test_share_types_extra_specs.py manila_tempest_tests/tests/api/admin/test_share_types_extra_specs_negative.py manila_tempest_tests/tests/api/admin/test_share_types_negative.py manila_tempest_tests/tests/api/admin/test_shares_actions.py manila_tempest_tests/tests/api/admin/test_snapshot_manage.py manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py manila_tempest_tests/tests/scenario/__init__.py manila_tempest_tests/tests/scenario/manager_share.py manila_tempest_tests/tests/scenario/test_share_basic_ops.py releasenotes/notes/.placeholder releasenotes/notes/add-export-locations-api-6fc6086c6a081faa.yaml releasenotes/notes/add-tegile-driver-1859114513edb13e.yaml releasenotes/notes/manage-unmanage-snapshot-bd92164472638f44.yaml releasenotes/notes/share-replication-81ecf4a32a5c83b6.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/colorizer.py tools/cover.sh tools/enable-pre-commit-hook.sh tools/fast8.sh tools/install_venv.py tools/install_venv_common.py tools/lintstack.py tools/lintstack.sh tools/with_venv.shmanila-2.0.0/manila.egg-info/PKG-INFO0000664000567000056710000000302112701407264020142 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: manila Version: 2.0.0 Summary: Shared Storage for OpenStack Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== MANILA ====== You have come across an OpenStack shared file system service. It has identified itself as "Manila." It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/Manila * Developer docs: http://docs.openstack.org/developer/manila Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://github.com/openstack/manila.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/manila Python client ------------- https://github.com/openstack/python-manilaclient.git Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 manila-2.0.0/manila.egg-info/top_level.txt0000664000567000056710000000003412701407264021600 0ustar jenkinsjenkins00000000000000manila manila_tempest_tests manila-2.0.0/manila.egg-info/pbr.json0000664000567000056710000000005612701407264020530 0ustar jenkinsjenkins00000000000000{"git_version": "296c995", "is_release": true}manila-2.0.0/manila.egg-info/entry_points.txt0000664000567000056710000000416012701407264022350 0ustar jenkinsjenkins00000000000000[console_scripts] manila-all = manila.cmd.all:main manila-api = manila.cmd.api:main manila-data = manila.cmd.data:main manila-manage = manila.cmd.manage:main manila-rootwrap = oslo_rootwrap.cmd:main manila-scheduler = manila.cmd.scheduler:main manila-share = manila.cmd.share:main [manila.scheduler.filters] AvailabilityZoneFilter = manila.scheduler.filters.availability_zone:AvailabilityZoneFilter CapabilitiesFilter = manila.scheduler.filters.capabilities:CapabilitiesFilter CapacityFilter = manila.scheduler.filters.capacity:CapacityFilter ConsistencyGroupFilter = manila.scheduler.filters.consistency_group:ConsistencyGroupFilter IgnoreAttemptedHostsFilter = manila.scheduler.filters.ignore_attempted_hosts:IgnoreAttemptedHostsFilter JsonFilter = manila.scheduler.filters.json:JsonFilter RetryFilter = manila.scheduler.filters.retry:RetryFilter ShareReplicationFilter = manila.scheduler.filters.share_replication:ShareReplicationFilter [manila.scheduler.weighers] CapacityWeigher = manila.scheduler.weighers.capacity:CapacityWeigher PoolWeigher = manila.scheduler.weighers.pool:PoolWeigher [manila.share.drivers.emc.plugins] isilon = manila.share.drivers.emc.plugins.isilon.isilon:IsilonStorageConnection vnx = manila.share.drivers.emc.plugins.vnx.connection:VNXStorageConnection [manila.tests.scheduler.fakes] FakeWeigher1 = manila.tests.scheduler.fakes:FakeWeigher1 FakeWeigher2 = manila.tests.scheduler.fakes:FakeWeigher2 [oslo.config.opts] manila = manila.opts:list_opts [oslo.config.opts.defaults] manila = manila.common.config:set_middleware_defaults [oslo_messaging.notify.drivers] manila.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver manila.openstack.common.notifier.no_op_notifier = oslo_messaging.notify._impl_noop:NoOpDriver manila.openstack.common.notifier.rpc_notifier = oslo_messaging.notify.messaging:MessagingDriver manila.openstack.common.notifier.rpc_notifier2 = oslo_messaging.notify.messaging:MessagingV2Driver manila.openstack.common.notifier.test_notifier = oslo_messaging.notify._impl_test:TestDriver [tempest.test_plugins] manila_tests = manila_tempest_tests.plugin:ManilaTempestPlugin manila-2.0.0/tools/0000775000567000056710000000000012701407265015257 5ustar jenkinsjenkins00000000000000manila-2.0.0/tools/lintstack.py0000775000567000056710000001645312701407107017634 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """pylint error checking.""" from __future__ import print_function import cStringIO as StringIO import json import re import sys from pylint import lint from pylint.reporters import text # Note(maoy): E1103 is error code related to partial type inference ignore_codes = ["E1103"] # Note(maoy): the error message is the pattern of E0202. It should be ignored # for manila.tests modules # Note(chen): the second error message is the pattern of [E0611] # It should be ignored because use six module to keep py3.X compatibility. ignore_messages = ["An attribute affected in manila.tests", "No name 'urllib' in module '_MovedItems'"] # Note(maoy): we ignore all errors in openstack.common because it should be # checked elsewhere. We also ignore manila.tests for now due to high false # positive rate. ignore_modules = ["manila/openstack/common/", "manila/tests/"] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict. Each key is a unique error identifier, value is a list of LintOutput. """ result = {} for line in msg.splitlines(): obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(self.filename.startswith(name) for name in ignore_modules): return True if any(msg in self.message for msg in ignore_messages): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:" "%(line_content)s\n%(code)s: %(message)s" % {'filename': self.filename, 'lineno': self.lineno, 'line_content': self.line_content, 'code': self.code, 'message': self.message}) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO.StringIO() reporter = text.ParseableTextReporter(output=buff) args = ["--include-ids=y", "-E", "manila"] lint.Run(args, reporter=reporter, exit=False) val = buff.getvalue() buff.close() return val def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def check(): print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) passed = True for err_key, err_list in errors.items(): for err in err_list: print(err.review_str() + "\n") passed = False if passed: print("Congrats! pylint check passed.") else: print("\nPlease fix the errors above. If you believe they are false " "positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false " "positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() elif option == "check": check() else: usage() if __name__ == "__main__": main() manila-2.0.0/tools/fast8.sh0000775000567000056710000000045312701407107016640 0ustar jenkinsjenkins00000000000000#!/bin/bash cd $(dirname "$0")/.. CHANGED=$(git diff --name-only HEAD~1 | tr '\n' ' ') # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK="$CHECK $FILE" fi done diff -u --from-file /dev/null $CHECK | flake8 --diff manila-2.0.0/tools/install_venv_common.py0000664000567000056710000001350712701407107021706 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() manila-2.0.0/tools/install_venv.py0000664000567000056710000000454112701407107020334 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import install_venv_common as install_venv # noqa def print_help(venv, root): help = """ OpenStack development environment setup is complete. OpenStack development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the OpenStack virtualenv for the extent of your current shell session you can run: $ source %s/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ %s/tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help % (venv, root)) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if os.environ.get('TOOLS_PATH'): root = os.environ['TOOLS_PATH'] venv = os.path.join(root, '.venv') if os.environ.get('VENV'): venv = os.environ['VENV'] pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'OpenStack' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help(venv, root) if __name__ == '__main__': main(sys.argv) manila-2.0.0/tools/with_venv.sh0000775000567000056710000000030612701407107017621 0ustar jenkinsjenkins00000000000000#!/bin/bash TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)/../} VENV_PATH=${VENV_PATH:-${TOOLS_PATH}} VENV_DIR=${VENV_DIR:-/.venv} VENV=${VENV:-${VENV_PATH}/${VENV_DIR}} source ${VENV}/bin/activate && "$@" manila-2.0.0/tools/lintstack.sh0000775000567000056710000000420612701407107017607 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions manila-2.0.0/tools/cover.sh0000775000567000056710000000460512701407107016734 0ustar jenkinsjenkins00000000000000#!/bin/bash # # Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING=4 TESTR_ARGS="$*" show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } # Stash uncommited changes, checkout master and save coverage report uncommited=$(git status --porcelain | grep -v "^??") [[ -n $uncommited ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t manila_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$TESTR_ARGS" coverage report --ignore-errors > $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) # Checkout back and unstash uncommited changes (if any) git checkout - [[ -n $uncommited ]] && git stash pop > /dev/null # Generate and save coverage report current_report=$(mktemp -t manila_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$TESTR_ARGS" coverage report --ignore-errors > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -gt $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "I believe you can cover all your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we should keep our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code manila-2.0.0/tools/colorizer.py0000775000567000056710000002735312701407107017651 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, Nebula, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Display a subunit stream through a colorized unittest test runner.""" import heapq import sys import unittest import six import subunit import testtools class _AnsiColorizer(object): """Colorizer allows callers to write text in a particular color. A colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): """Check is the current platform supports coloring terminal output. A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): import win32console red, green, blue, bold = (win32console.FOREGROUND_RED, win32console.FOREGROUND_GREEN, win32console.FOREGROUND_BLUE, win32console.FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold, } def supported(cls, stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'red' elif elapsed_time > 0.25: return 'yellow' else: return 'green' class OpenStackTestResult(testtools.TestResult): def __init__(self, stream, descriptions, verbosity): super(OpenStackTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.num_slow_tests = 10 self.slow_tests = [] # this is a fixed-sized heap self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout self.start_time = None self.last_time = {} self.results = {} self.last_written = None def _writeElapsedTime(self, elapsed): color = get_elapsed_time_color(elapsed) self.colorizer.write(" %.2f" % elapsed, color) def _addResult(self, test, *args): try: name = test.id() except AttributeError: name = 'Unknown.unknown' test_class, test_name = name.rsplit('.', 1) elapsed = (self._now() - self.start_time).total_seconds() item = (elapsed, test_class, test_name) if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) self.results.setdefault(test_class, []) self.results[test_class].append((test_name, elapsed) + args) self.last_time[test_class] = self._now() self.writeTests() def _writeResult(self, test_name, elapsed, long_result, color, short_result, success): if self.showAll: self.stream.write(' %s' % str(test_name).ljust(66)) self.colorizer.write(long_result, color) if success: self._writeElapsedTime(elapsed) self.stream.writeln() else: self.colorizer.write(short_result, color) def addSuccess(self, test): super(OpenStackTestResult, self).addSuccess(test) self._addResult(test, 'OK', 'green', '.', True) def addFailure(self, test, err): if test.id() == 'process-returncode': return super(OpenStackTestResult, self).addFailure(test, err) self._addResult(test, 'FAIL', 'red', 'F', False) def addError(self, test, err): super(OpenStackTestResult, self).addFailure(test, err) self._addResult(test, 'ERROR', 'red', 'E', False) def addSkip(self, test, reason=None, details=None): super(OpenStackTestResult, self).addSkip(test, reason, details) self._addResult(test, 'SKIP', 'blue', 'S', True) def startTest(self, test): self.start_time = self._now() super(OpenStackTestResult, self).startTest(test) def writeTestCase(self, cls): if not self.results.get(cls): return if cls != self.last_written: self.colorizer.write(cls, 'white') self.stream.writeln() for result in self.results[cls]: self._writeResult(*result) del self.results[cls] self.stream.flush() self.last_written = cls def writeTests(self): time = self.last_time.get(self.last_written, self._now()) if not self.last_written or (self._now() - time).total_seconds() > 2.0: diff = 3.0 while diff > 2.0: classes = self.results.keys() oldest = min(classes, key=lambda x: self.last_time[x]) diff = (self._now() - self.last_time[oldest]).total_seconds() self.writeTestCase(oldest) else: self.writeTestCase(self.last_written) def done(self): self.stopTestRun() def stopTestRun(self): for cls in list(six.iterkeys(self.results)): self.writeTestCase(cls) self.stream.writeln() self.writeSlowTests() def writeSlowTests(self): # Pare out 'fast' tests slow_tests = [item for item in self.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) slow = ("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) self.colorizer.write(slow, 'yellow') self.stream.writeln() last_cls = None # sort by name for elapsed, cls, name in sorted(slow_tests, key=lambda x: x[1] + x[2]): if cls != last_cls: self.colorizer.write(cls, 'white') self.stream.writeln() last_cls = cls self.stream.write(' %s' % str(name).ljust(68)) self._writeElapsedTime(elapsed) self.stream.writeln() def printErrors(self): if self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavor, errors): for test, err in errors: self.colorizer.write("=" * 70, 'red') self.stream.writeln() self.colorizer.write(flavor, 'red') self.stream.writeln(": %s" % test.id()) self.colorizer.write("-" * 70, 'red') self.stream.writeln() self.stream.writeln("%s" % err) test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) if sys.version_info[0:2] <= (2, 6): runner = unittest.TextTestRunner(verbosity=2) else: runner = unittest.TextTestRunner(verbosity=2, resultclass=OpenStackTestResult) if runner.run(test).wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code) manila-2.0.0/tools/enable-pre-commit-hook.sh0000775000567000056710000000232012701407107022044 0ustar jenkinsjenkins00000000000000#!/bin/sh # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PRE_COMMIT_SCRIPT=.git/hooks/pre-commit make_hook() { echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT chmod +x $PRE_COMMIT_SCRIPT if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then echo "pre-commit hook was created successfully" else echo "unable to create pre-commit hook" fi } # NOTE(jk0): Make sure we are in manila's root directory before adding the hook. if [ ! -d ".git" ]; then echo "unable to find .git; moving up a directory" cd .. if [ -d ".git" ]; then make_hook else echo "still unable to find .git; hook not created" fi else make_hook fi manila-2.0.0/manila/0000775000567000056710000000000012701407265015360 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/test.py0000664000567000056710000003045012701407107016706 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ import os import shutil import uuid import fixtures import mock from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture import oslo_i18n from oslo_messaging import conffixture as messaging_conffixture import oslotest.base as base_test from manila.db import migration from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models as db_models from manila import rpc from manila import service from manila.tests import conf_fixture from manila.tests import fake_notifier test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', help='File name of clean sqlite database.'), ] CONF = cfg.CONF CONF.register_opts(test_opts) _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, db_session, db_migrate, sql_connection, sqlite_db, sqlite_clean_db): self.sql_connection = sql_connection self.sqlite_db = sqlite_db self.sqlite_clean_db = sqlite_clean_db self.engine = db_session.get_engine() self.engine.dispose() conn = self.engine.connect() if sql_connection == "sqlite://": self.setup_sqlite(db_migrate) else: testdb = os.path.join(CONF.state_path, sqlite_db) db_migrate.upgrade('head') if os.path.exists(testdb): return if sql_connection == "sqlite://": conn = self.engine.connect() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() else: cleandb = os.path.join(CONF.state_path, sqlite_clean_db) shutil.copyfile(testdb, cleandb) def setUp(self): super(Database, self).setUp() if self.sql_connection == "sqlite://": conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) # pylint: disable=E1101 else: shutil.copyfile( os.path.join(CONF.state_path, self.sqlite_clean_db), os.path.join(CONF.state_path, self.sqlite_db), ) def setup_sqlite(self, db_migrate): if db_migrate.version(): return db_models.BASE.metadata.create_all(self.engine) db_migrate.stamp('head') class TestCase(base_test.BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() oslo_i18n.enable_lazy(enable=False) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database( db_api, migration, sql_connection=CONF.database.connection, sqlite_db=CONF.sqlite_db, sqlite_clean_db=CONF.sqlite_clean_db, ) self.useFixture(_DB_CACHE) self.injected = [] self._services = [] self.flags(fatal_exception_format_errors=True) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.fixture.config( disable_process_locking=True, group='oslo_concurrency') rpc.add_extra_exmods('manila.tests') self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) rpc.init(CONF) mock.patch('keystoneauth1.loading.load_auth_from_conf_options').start() fake_notifier.stub_notifier(self) def tearDown(self): """Runs after each test method to tear down test environment.""" super(TestCase, self).tearDown() # Reset any overridden flags CONF.reset() # Stop any timers for x in self.injected: try: x.stop() except AssertionError: pass # Kill any services for x in self._services: try: x.kill() except Exception: pass # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: del self.__dict__[key] def flags(self, **kw): """Override flag variables for a test.""" for k, v in kw.items(): CONF.set_override(k, v) def start_service(self, name, host=None, **kwargs): host = host and host or uuid.uuid4().hex kwargs.setdefault('host', host) kwargs.setdefault('binary', 'manila-%s' % name) svc = service.Service.create(**kwargs) svc.start() self._services.append(svc) return svc def mock_object(self, obj, attr_name, new_attr=None, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ if not new_attr: new_attr = mock.Mock() patcher = mock.patch.object(obj, attr_name, new_attr, **kwargs) patcher.start() self.addCleanup(patcher.stop) return new_attr def mock_class(self, class_name, new_val=None, **kwargs): """Use python mock to mock a class Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ if not new_val: new_val = mock.Mock() patcher = mock.patch(class_name, new_val, **kwargs) patcher.start() self.addCleanup(patcher.stop) return new_val # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested dictionaries appropriately. NOTE: If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' 'd2: %(d2str)s' % {"msg": msg, "d1str": d1str, "d2str": d2str}) raise AssertionError(base_msg) d1keys = set(d1.keys()) d2keys = set(d2.keys()) if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys raise_assertion('Keys in d1 and not d2: %(d1only)s. ' 'Keys in d2 and not d1: %(d2only)s' % {"d1only": d1only, "d2only": d2only}) for key in d1keys: d1value = d1[key] d2value = d2[key] try: error = abs(float(d1value) - float(d2value)) within_tolerance = error <= tolerance except (ValueError, TypeError): # If both values aren't convertible to float, just ignore # ValueError if arg is a str, TypeError if it's something else # (like None) within_tolerance = False if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue elif approx_equal and within_tolerance: continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % { "key": key, "d1value": d1value, "d2value": d2value }) def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) L2str = str(L2) base_msg = ('List of dictionaries do not match: %(msg)s ' 'L1: %(L1str)s L2: %(L2str)s' % {"msg": msg, "L1str": L1str, "L2str": L2str}) raise AssertionError(base_msg) L1count = len(L1) L2count = len(L2) if L1count != L2count: raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' 'len(L2)=%(L2count)d' % {"L1count": L1count, "L2count": L2count}) for d1, d2 in zip(L1, L2): self.assertDictMatch(d1, d2, approx_equal=approx_equal, tolerance=tolerance) def assertSubDictMatch(self, sub_dict, super_dict): """Assert a sub_dict is subset of super_dict.""" self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) for k, sub_value in sub_dict.items(): super_value = super_dict[k] if isinstance(sub_value, dict): self.assertSubDictMatch(sub_value, super_value) elif 'DONTCARE' in (sub_value, super_value): continue else: self.assertEqual(sub_value, super_value) def assertIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' in 'b'.""" try: f = super(TestCase, self).assertIn except AttributeError: self.assertTrue(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertNotIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' NOT in 'b'.""" try: f = super(TestCase, self).assertNotIn except AttributeError: self.assertFalse(a in b, *args, **kwargs) else: f(a, b, *args, **kwargs) def assertIsInstance(self, a, b, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsInstance except AttributeError: self.assertIsInstance(a, b) else: f(a, b, *args, **kwargs) def assertIsNone(self, a, *args, **kwargs): """Python < v2.7 compatibility.""" try: f = super(TestCase, self).assertIsNone except AttributeError: self.assertTrue(a is None) else: f(a, *args, **kwargs) def _dict_from_object(self, obj, ignored_keys): if ignored_keys is None: ignored_keys = [] return {k: v for k, v in obj.iteritems() if k not in ignored_keys} def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) sort_key = lambda d: [d[k] for k in sorted(d)] conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) manila-2.0.0/manila/quota.py0000664000567000056710000013434512701407107017070 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for shares.""" import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import six from manila import db from manila import exception from manila.i18n import _LE LOG = log.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_shares', default=50, help='Number of shares allowed per project.'), cfg.IntOpt('quota_snapshots', default=50, help='Number of share snapshots allowed per project.'), cfg.IntOpt('quota_gigabytes', default=1000, help='Number of share gigabytes allowed per project.'), cfg.IntOpt('quota_snapshot_gigabytes', default=1000, help='Number of snapshot gigabytes allowed per project.'), cfg.IntOpt('quota_share_networks', default=10, help='Number of share-networks allowed per project.'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires.'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed.'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes.'), cfg.StrOpt('quota_driver', default='manila.quota.DbQuotaDriver', help='Default driver to use for quota checks.'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): """Database Quota driver. Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return db.quota_get(context, project_id, user_id, resource) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return db.quota_get(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource) def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} default_quotas = db.quota_class_get_default(context) for resource in resources.values(): quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Retrieve quotas for a quota class. Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) for resource in resources.values(): if defaults or resource.name in class_quotas: quotas[resource.name] = class_quotas.get(resource.name, resource.default) return quotas def _process_quotas(self, context, resources, project_id, quotas, quota_class=None, defaults=True, usages=None, remains=False): modified_quotas = {} # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} default_quotas = self.get_defaults(context, resources) for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in quotas: continue limit = quotas.get( resource.name, class_quotas.get(resource.name, default_quotas[resource.name])) modified_quotas[resource.name] = dict(limit=limit) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = usages.get(resource.name, {}) modified_quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) # Initialize remains quotas. if remains: modified_quotas[resource.name].update(remains=limit) if remains: all_quotas = db.quota_get_all(context, project_id) for quota in all_quotas: if quota.resource in modified_quotas: modified_quotas[quota.resource]['remains'] -= ( quota.hard_limit) return modified_quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve quotas for project. Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ project_quotas = db.quota_get_all_by_project(context, project_id) project_usages = None if usages: project_usages = db.quota_usage_get_all_by_project(context, project_id) return self._process_quotas(context, resources, project_id, project_quotas, quota_class, defaults=defaults, usages=project_usages, remains=remains) def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve quotas for user and project. Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ user_quotas = db.quota_get_all_by_project_and_user(context, project_id, user_id) # Use the project quota for default user quota. proj_quotas = db.quota_get_all_by_project(context, project_id) for key, value in proj_quotas.items(): if key not in user_quotas.keys(): user_quotas[key] = value user_usages = None if usages: user_usages = db.quota_usage_get_all_by_project_and_user( context, project_id, user_id) return self._process_quotas(context, resources, project_id, user_quotas, quota_class, defaults=defaults, usages=user_usages) def get_settable_quotas(self, context, resources, project_id, user_id=None): """Retrieve range of settable quotas. Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ settable_quotas = {} project_quotas = self.get_project_quotas(context, resources, project_id, remains=True) if user_id: user_quotas = self.get_user_quotas(context, resources, project_id, user_id) setted_quotas = db.quota_get_all_by_project_and_user( context, project_id, user_id) for key, value in user_quotas.items(): maximum = (project_quotas[key]['remains'] + setted_quotas.get(key, 0)) settable_quotas[key] = dict( minimum=value['in_use'] + value['reserved'], maximum=maximum) else: for key, value in project_quotas.items(): minimum = max(int(value['limit'] - value['remains']), int(value['in_use'] + value['reserved'])) settable_quotas[key] = dict(minimum=minimum, maximum=-1) return settable_quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None, user_id=None): """Retrieve quotas for a resource. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # Filter resources if has_sync: sync_filt = lambda x: hasattr(x, 'sync') else: sync_filt = lambda x: not hasattr(x, 'sync') desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired and sync_filt(v)} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) if user_id: # Grab and return the quotas (without usages) quotas = self.get_user_quotas(context, sub_resources, project_id, user_id, context.quota_class, usages=False) else: # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id) user_quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id, user_id=user_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if (quotas[key] >= 0 and quotas[key] < val) or (user_quotas[key] >= 0 and user_quotas[key] < val)] if overs: raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # Set up the reservation expiration if expire is None: expire = CONF.reservation_expire if isinstance(expire, six.integer_types): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id) user_quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id, user_id=user_id) # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, CONF.until_refresh, CONF.max_age, project_id=project_id, user_id=user_id) def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def usage_reset(self, context, resources): """Reset usage records. Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ # We need an elevated context for the calls to # quota_usage_update() elevated = context.elevated() for resource in resources: try: # Reset the usage to -1, which will force it to be # refreshed db.quota_usage_update(elevated, context.project_id, context.user_id, resource, in_use=-1) except exception.QuotaUsageNotFound: # That means it'll be refreshed anyway pass def destroy_all_by_project(self, context, project_id): """Destroy metadata associated with a project. Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_all_by_project(context, project_id) def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy metadata associated with a project and user. Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ db.quota_destroy_all_by_project_and_user(context, project_id, user_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): """Initializes a Resource. :param name: The name of the resource, i.e., "shares". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag def quota(self, driver, context, **kwargs): """Obtain quota for a resource. Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. :param project_id: The project to obtain the quota value for. If not provided, it is taken from the context. If it is given as None, no project-specific quota will be searched for. :param quota_class: The quota class corresponding to the project, or for which the quota is to be looked up. If not provided, it is taken from the context. If it is given as None, no quota class-specific quota will be searched for. Note that the quota class defaults to the value in the context, which may not correspond to the project if project_id is not the same as the one in the context. """ # Get the project ID project_id = kwargs.get('project_id', context.project_id) # Ditto for the quota class quota_class = kwargs.get('quota_class', context.quota_class) # Look up the quota for the project if project_id: try: return driver.get_by_project(context, project_id, self.name) except exception.ProjectQuotaNotFound: pass # Try for the quota class if quota_class: try: return driver.get_by_class(context, quota_class, self.name) except exception.QuotaClassNotFound: pass # OK, return the default return self.default @property def default(self): """Return the default value of the quota.""" return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "shares". :param sync: A callable which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) self.sync = sync class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" pass class CountableResource(AbsoluteResource): """Describe a countable resource. Describe a resource where the counts aren't based solely on the project ID. """ def __init__(self, name, count, flag=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "shares". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count = count class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._driver_cls = quota_driver_class self.__driver = None @property def _driver(self): if self.__driver: return self.__driver if not self._driver_cls: self._driver_cls = CONF.quota_driver if isinstance(self._driver_cls, six.string_types): self._driver_cls = importutils.import_object(self._driver_cls) self.__driver = self._driver_cls return self.__driver def __contains__(self, resource): return resource in self._resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_project_and_user(self, context, project_id, user_id, resource): """Get a specific quota by project and user.""" return self._driver.get_by_project_and_user(context, project_id, user_id, resource) def get_by_project(self, context, project_id, resource): """Get a specific quota by project.""" return self._driver.get_by_project(context, project_id, resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource) def get_defaults(self, context): """Retrieve the default quotas. :param context: The request context, for access checks. """ return self._driver.get_defaults(context, self._resources) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self._resources, quota_class, defaults=defaults) def get_user_quotas(self, context, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_user_quotas(context, self._resources, project_id, user_id, quota_class=quota_class, defaults=defaults, usages=usages) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._driver.get_project_quotas(context, self._resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages, remains=remains) def get_settable_quotas(self, context, project_id, user_id=None): """Get settable quotas. Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ return self._driver.get_settable_quotas(context, self._resources, project_id, user_id=user_id) def count(self, context, resource, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. """ # Get the resource res = self._resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) def limit_check(self, context, project_id=None, user_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ return self._driver.limit_check(context, self._resources, values, project_id=project_id, user_id=user_id) def reserve(self, context, expire=None, project_id=None, user_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve(context, self._resources, deltas, expire=expire, project_id=project_id, user_id=user_id) LOG.debug("Created reservations %s", reservations) return reservations def commit(self, context, reservations, project_id=None, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to commit reservations %s"), reservations) return LOG.debug("Committed reservations %s", reservations) def rollback(self, context, reservations, project_id=None, user_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback(context, reservations, project_id=project_id, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_LE("Failed to roll back reservations %s"), reservations) return LOG.debug("Rolled back reservations %s", reservations) def usage_reset(self, context, resources): """Reset usage records. Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ self._driver.usage_reset(context, resources) def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy metadata associated with a project and user. Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ self._driver.destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project(self, context, project_id): """Destroy metadata associated with a project. Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_all_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) @property def resources(self): return sorted(self._resources.keys()) QUOTAS = QuotaEngine() resources = [ ReservableResource('shares', '_sync_shares', 'quota_shares'), ReservableResource('snapshots', '_sync_snapshots', 'quota_snapshots'), ReservableResource('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ReservableResource('snapshot_gigabytes', '_sync_snapshot_gigabytes', 'quota_snapshot_gigabytes'), ReservableResource('share_networks', '_sync_share_networks', 'quota_share_networks'), ] QUOTAS.register_resources(resources) manila-2.0.0/manila/tests/0000775000567000056710000000000012701407265016522 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/fake_zfssa.py0000664000567000056710000000554412701407107021213 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake ZFS Storage Appliance, for unit testing. """ class FakeResponse(object): def __init__(self, statuscode): self.status = statuscode self.data = 'data' class FakeZFSSA(object): """Fake ZFS SA.""" def __init__(self): self.user = None self.host = 'fakehost' self.url = 'fakeurl' self.rclient = None def login(self, user): self.user = user def set_host(self, host, timeout=None): self.host = host def enable_service(self, service): return True def create_project(self, pool, project, arg): pass def get_share(self, pool, project, share): pass def create_share(self, pool, project, share): pass def delete_share(self, pool, project, share): pass def create_snapshot(self, pool, project, share): pass def delete_snapshot(self, pool, project, share, snapshot): pass def clone_snapshot(self, pool, project, share, snapshot, clone, size): pass def has_clones(self, pool, project, vol, snapshot): return False def modify_share(self, pool, project, share, arg): pass def allow_access_nfs(self, pool, project, share, access): pass def deny_access_nfs(self, pool, project, share, access): pass def get_project_stats(self, pool, project): pass class FakeRestClient(object): """Fake ZFSSA Rest Client.""" def __init__(self): self.url = None self.headers = None self.log_function = None self.local = None self.base_path = None self.timeout = 60 self.do_logout = False self.auth_str = None def _path(self, path, base_path=None): pass def _authoriza(self): pass def login(self, auth_str): pass def logout(self): pass def islogin(self): pass def request(self, path, request, body=None, **kwargs): pass def get(self, path, **kwargs): pass def post(self, path, body="", **kwargs): pass def put(self, path, body="", **kwargs): pass def delete(self, path, **kwargs): pass def head(self, path, **kwargs): pass manila-2.0.0/manila/tests/fake_client_exception_class.py0000664000567000056710000000140612701407107024577 0ustar jenkinsjenkins00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Unauthorized(Exception): status_code = 401 message = "Unauthorized: bad credentials." def __init__(self, message=None): pass manila-2.0.0/manila/tests/test_misc.py0000664000567000056710000000423112701407107021061 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os from manila import exception from manila import test class ExceptionTestCase(test.TestCase): @staticmethod def _raise_exc(exc): raise exc() def test_exceptions_raise(self): # NOTE(dprince): disable format errors since we are not passing kwargs self.flags(fatal_exception_format_errors=False) for name in dir(exception): exc = getattr(exception, name) if isinstance(exc, type): self.assertRaises(exc, self._raise_exc, exc) class ProjectTestCase(test.TestCase): def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') py_glob = os.path.join(topdir, "manila", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") missing_downgrade = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and not has_downgrade: fname = os.path.basename(path) missing_downgrade.append(fname) helpful_msg = ("The following migrations are missing a downgrade:" "\n\t%s") % '\n\t'.join(sorted(missing_downgrade)) self.assertTrue(not missing_downgrade, helpful_msg) manila-2.0.0/manila/tests/fake_driver.py0000664000567000056710000000573712701407107021364 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from manila.share import driver LOG = log.getLogger(__name__) class FakeShareDriver(driver.ShareDriver): """Fake share driver.""" def __init__(self, *args, **kwargs): super(FakeShareDriver, self).__init__([True, False], *args, **kwargs) def manage_existing(self, share, driver_options): LOG.debug("Fake share driver: manage") LOG.debug("Fake share driver: driver options: %s", six.text_type(driver_options)) return {'size': 1} def unmanage(self, share): LOG.debug("Fake share driver: unmanage") @property def driver_handles_share_servers(self): if not isinstance(self.configuration.safe_get( 'driver_handles_share_servers'), bool): return True return self.configuration.driver_handles_share_servers def create_snapshot(self, context, snapshot, share_server=None): pass def delete_snapshot(self, context, snapshot, share_server=None): pass def create_share(self, context, share, share_server=None): return ['/fake/path', '/fake/path2'] def create_share_from_snapshot(self, context, share, snapshot, share_server=None): return ['/fake/path', '/fake/path2'] def delete_share(self, context, share, share_server=None): pass def ensure_share(self, context, share, share_server=None): pass def allow_access(self, context, share, access, share_server=None): pass def deny_access(self, context, share, access, share_server=None): pass def get_share_stats(self, refresh=False): return None def do_setup(self, context): pass def setup_server(self, *args, **kwargs): pass def teardown_server(self, *args, **kwargs): pass def get_network_allocations_number(self): # NOTE(vponomaryov): Simulate drivers that use share servers and # do not use 'service_instance' module. return 2 def _verify_share_server_handling(self, driver_handles_share_servers): return super(FakeShareDriver, self)._verify_share_server_handling( driver_handles_share_servers) def create_consistency_group(self, context, cg_id): pass def delete_consistency_group(self, context, cg_id): pass manila-2.0.0/manila/tests/fake_share.py0000664000567000056710000002025312701407112021155 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Intel, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from manila.common import constants from manila.db.sqlalchemy import models from manila.tests.db import fakes as db_fakes def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'fake_proto', 'share_network_id': 'fake share network id', 'share_server_id': 'fake share server id', 'share_type_id': 'fake share type id', 'export_location': 'fake_location:/fake_share', 'project_id': 'fake_project_uuid', 'availability_zone': 'fake_az', 'snapshot_support': 'True', 'replication_type': None, 'is_busy': False, 'consistency_group_id': 'fakecgid', 'instance': {'host': 'fakehost'}, } share.update(kwargs) return db_fakes.FakeModel(share) def fake_share_instance(base_share=None, **kwargs): if base_share is None: share = fake_share() else: share = base_share share_instance = { 'share_id': share['id'], 'id': "fakeinstanceid", 'status': "active", } for attr in models.ShareInstance._proxified_properties: share_instance[attr] = getattr(share, attr, None) return db_fakes.FakeModel(share_instance) def fake_share_type(**kwargs): share_type = { 'id': "fakesharetype", 'name': "fakesharetypename", 'is_public': False, 'extra_specs': { 'driver_handles_share_servers': 'False', 'snapshot_support': 'True', } } extra_specs = kwargs.pop('extra_specs', {}) for key, value in extra_specs.items(): share_type['extra_specs'][key] = value share_type.update(kwargs) return db_fakes.FakeModel(share_type) def fake_snapshot(create_instance=False, **kwargs): instance_keys = ('instance_id', 'snapshot_id', 'share_instance_id', 'status', 'progress', 'provider_location') snapshot_keys = ('id', 'share_name', 'share_id', 'name', 'share_size', 'share_proto', 'instance', 'aggregate_status') instance_kwargs = {k: kwargs.get(k) for k in instance_keys if k in kwargs} snapshot_kwargs = {k: kwargs.get(k) for k in snapshot_keys if k in kwargs} aggregate_status = snapshot_kwargs.get( 'aggregate_status', instance_kwargs.get( 'status', constants.STATUS_CREATING)) snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'fake_proto', 'instance': None, 'share': 'fake_share', 'aggregate_status': aggregate_status, } snapshot.update(snapshot_kwargs) if create_instance: if 'instance_id' in instance_kwargs: instance_kwargs['id'] = instance_kwargs.pop('instance_id') snapshot['instance'] = fake_snapshot_instance( base_snapshot=snapshot, **instance_kwargs) snapshot['status'] = snapshot['instance']['status'] snapshot['provider_location'] = ( snapshot['instance']['provider_location'] ) snapshot['progress'] = snapshot['instance']['progress'] else: snapshot['status'] = constants.STATUS_AVAILABLE snapshot['progress'] = '0%' snapshot['provider_location'] = 'fake' snapshot.update(instance_kwargs) return db_fakes.FakeModel(snapshot) def fake_snapshot_instance(base_snapshot=None, **kwargs): if base_snapshot is None: base_snapshot = fake_snapshot() snapshot_instance = { 'id': 'fakesnapshotinstanceid', 'snapshot_id': base_snapshot['id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'provider_location': 'i_live_here_actually', 'share_name': 'fakename', 'share_id': 'fakeshareinstanceid', 'share_instance_id': 'fakeshareinstanceid', } snapshot_instance.update(kwargs) return db_fakes.FakeModel(snapshot_instance) def expected_snapshot(id='fake_snapshot_id', **kwargs): self_link = 'http://localhost/v1/fake/snapshots/%s' % id bookmark_link = 'http://localhost/fake/snapshots/%s' % id snapshot = { 'id': id, 'share_id': 'fakeshareid', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'status': 'fakesnapstatus', 'name': 'displaysnapname', 'description': 'displaysnapdesc', 'share_size': 1, 'size': 1, 'share_proto': 'fakesnapproto', 'links': [ { 'href': self_link, 'rel': 'self', }, { 'href': bookmark_link, 'rel': 'bookmark', }, ], } snapshot.update(kwargs) return {'snapshot': snapshot} def search_opts(**kwargs): search_opts = { 'name': 'fake_name', 'status': 'fake_status', 'share_id': 'fake_share_id', 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'offset': '1', 'limit': '1', } search_opts.update(kwargs) return search_opts def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) def fake_replica(id=None, as_primitive=True, for_manager=False, **kwargs): replica = { 'id': id or str(uuid.uuid4()), 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': None, 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [{'path': 'path1'}, {'path': 'path2'}], 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', 'access_rules_status': 'out_of_sync', } if for_manager: replica.update({ 'user_id': None, 'project_id': None, 'share_type_id': None, 'size': None, 'display_name': None, 'display_description': None, 'snapshot_id': None, 'share_proto': None, 'is_public': None, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'availability_zone': 'fake_az', }) replica.update(kwargs) if as_primitive: return replica else: return db_fakes.FakeModel(replica) def fake_replica_request_spec(as_primitive=True, **kwargs): replica = fake_replica(id='9c0db763-a109-4862-b010-10f2bd395295') all_replica_hosts = ','.join(['fake_active_replica_host', replica['host']]) request_spec = { 'share_properties': fake_share( id='f0e4bb5e-65f0-11e5-9d70-feff819cdc9f'), 'share_instance_properties': replica, 'share_proto': 'nfs', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'snapshot_id': None, 'share_type': 'fake_share_type', 'consistency_group': None, 'active_replica_host': 'fake_active_replica_host', 'all_replica_hosts': all_replica_hosts, } request_spec.update(kwargs) if as_primitive: return request_spec else: return db_fakes.FakeModel(request_spec) manila-2.0.0/manila/tests/test_policy.py0000664000567000056710000002053312701407107021430 0ustar jenkinsjenkins00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Policy Engine For Manila.""" import os.path from oslo_config import cfg from oslo_policy import policy as common_policy from manila import context from manila import exception from manila import policy from manila import test from manila import utils CONF = cfg.CONF class PolicyFileTestCase(test.TestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() # since is_admin is defined by policy, create context before reset self.context = context.RequestContext('fake', 'fake') policy.reset() self.target = {} def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') CONF.set_override('policy_file', tmpfilename, group='oslo_policy') action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") policy.init(tmpfilename) policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") # NOTE(vish): reset stored policy cache so we don't have to # sleep(1) policy._ENFORCER.load_rules(True) self.assertRaises( exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target, ) class PolicyTestCase(test.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() policy.reset() policy.init() self.rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], "example:get_http": [["http:http://www.example.com"]], "example:my_file": [["role:compute_admin"], ["project_id:%(project_id)s"]], "example:early_and_fail": [["false:false", "rule:true"]], "example:early_or_success": [["rule:true"], ["false:false"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } self._set_rules() self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} def tearDown(self): policy.reset() super(PolicyTestCase, self).tearDown() def _set_rules(self): these_rules = common_policy.Rules.from_dict(self.rules) policy._ENFORCER.set_rules(these_rules) def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_good_action(self): action = "example:allowed" policy.enforce(self.context, action, self.target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(test.TestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() policy.reset() policy.init() self.rules = { "default": [], "example:exist": "false:false" } self._set_rules('default') self.context = context.RequestContext('fake', 'fake') def tearDown(self): super(DefaultPolicyTestCase, self).tearDown() policy.reset() def _set_rules(self, default_rule): these_rules = common_policy.Rules.from_dict(self.rules, default_rule=default_rule) policy._ENFORCER.set_rules(these_rules) def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): new_default_rule = "default_noexist" # FIXME(gyee): need to overwrite the Enforcer's default_rule first # as it is recreating the rules with its own default_rule instead # of the default_rule passed in from set_rules(). I think this is a # bug in Oslo policy. policy._ENFORCER.default_rule = new_default_rule self._set_rules(new_default_rule) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) class ContextIsAdminPolicyTestCase(test.TestCase): def setUp(self): super(ContextIsAdminPolicyTestCase, self).setUp() policy.reset() policy.init() def _set_rules(self, rules, default_rule): these_rules = common_policy.Rules.from_dict(rules, default_rule=default_rule) policy._ENFORCER.set_rules(these_rules) def test_default_admin_role_is_admin(self): ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertTrue(ctx.is_admin) def test_custom_admin_role_is_admin(self): # define explicit rules for context_is_admin rules = { 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] } self._set_rules(rules, CONF.oslo_policy.policy_default_rule) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertTrue(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['administrator']) self.assertTrue(ctx.is_admin) # default rule no longer applies ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertFalse(ctx.is_admin) def test_context_is_admin_undefined(self): rules = { "admin_or_owner": "role:admin or project_id:%(project_id)s", "default": "rule:admin_or_owner", } self._set_rules(rules, CONF.oslo_policy.policy_default_rule) ctx = context.RequestContext('fake', 'fake') self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertTrue(ctx.is_admin) manila-2.0.0/manila/tests/data/0000775000567000056710000000000012701407265017433 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/data/__init__.py0000664000567000056710000000000012701407107021525 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/data/test_helper.py0000664000567000056710000002334012701407107022320 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from manila.common import constants from manila import context from manila.data import helper as data_copy_helper from manila import db from manila import exception from manila.share import rpcapi as share_rpc from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class DataServiceHelperTestCase(test.TestCase): """Tests DataServiceHelper.""" def setUp(self): super(DataServiceHelperTestCase, self).setUp() self.share = db_utils.create_share() self.share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) self.context = context.get_admin_context() self.access = db_utils.create_access(share_id=self.share['id']) self.helper = data_copy_helper.DataServiceHelper( self.context, db, self.share) def test_allow_data_access(self): access_create = {'access_type': self.access['access_type'], 'access_to': self.access['access_to'], 'access_level': self.access['access_level'], 'share_id': self.access['share_id']} # mocks self.mock_object( self.helper.db, 'share_access_get_all_by_type_and_access', mock.Mock(return_value=[self.access])) self.mock_object(self.helper, '_change_data_access_to_instance') self.mock_object(self.helper.db, 'share_access_create', mock.Mock(return_value=self.access)) # run self.helper._allow_data_access( self.access, self.share_instance['id'], self.share_instance['id']) # asserts self.helper.db.share_access_get_all_by_type_and_access.\ assert_called_once_with( self.context, self.share['id'], self.access['access_type'], self.access['access_to']) self.helper.db.share_access_create.assert_called_once_with( self.context, access_create) self.helper._change_data_access_to_instance.assert_has_calls( [mock.call(self.share_instance['id'], self.access, allow=False), mock.call(self.share_instance['id'], self.access, allow=True), mock.call(self.share_instance['id'], self.access, allow=True)]) def test_deny_access_to_data_service(self): # mocks self.mock_object(self.helper, '_change_data_access_to_instance') # run self.helper.deny_access_to_data_service( self.access, self.share_instance['id']) # asserts self.helper._change_data_access_to_instance.\ assert_called_once_with( self.share_instance['id'], self.access, allow=False) @ddt.data(None, Exception('fake')) def test_cleanup_data_access(self, exc): # mocks self.mock_object(self.helper, 'deny_access_to_data_service', mock.Mock(side_effect=exc)) self.mock_object(data_copy_helper.LOG, 'warning') # run self.helper.cleanup_data_access(self.access, self.share_instance['id']) # asserts self.helper.deny_access_to_data_service.assert_called_once_with( self.access, self.share_instance['id']) if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(False, True) def test_cleanup_temp_folder(self, exc): fake_path = ''.join(('/fake_path/', self.share_instance['id'])) # mocks self.mock_object(os.path, 'exists', mock.Mock(side_effect=[True, True, exc])) self.mock_object(os, 'rmdir') self.mock_object(data_copy_helper.LOG, 'warning') # run self.helper.cleanup_temp_folder( self.share_instance['id'], '/fake_path/') # asserts os.rmdir.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path) ]) if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(None, Exception('fake')) def test_cleanup_unmount_temp_folder(self, exc): # mocks self.mock_object(self.helper, 'unmount_share_instance', mock.Mock(side_effect=exc)) self.mock_object(data_copy_helper.LOG, 'warning') # run self.helper.cleanup_unmount_temp_folder( 'unmount_template', 'fake_path', self.share_instance['id']) # asserts self.helper.unmount_share_instance.assert_called_once_with( 'unmount_template', 'fake_path', self.share_instance['id']) if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(True, False) def test__change_data_access_to_instance(self, allow): # mocks self.mock_object(self.helper.db, 'share_instance_update_access_status') self.mock_object(self.helper.db, 'share_instance_get', mock.Mock(return_value=self.share_instance)) if allow: self.mock_object(share_rpc.ShareAPI, 'allow_access') else: self.mock_object(share_rpc.ShareAPI, 'deny_access') self.mock_object(utils, 'wait_for_access_update') # run self.helper._change_data_access_to_instance( self.share_instance['id'], self.access, allow=allow) # asserts self.helper.db.share_instance_update_access_status.\ assert_called_once_with(self.context, self.share_instance['id'], constants.STATUS_OUT_OF_SYNC) self.helper.db.share_instance_get.assert_called_once_with( self.context, self.share_instance['id'], with_share_data=True) if allow: share_rpc.ShareAPI.allow_access.assert_called_once_with( self.context, self.share_instance, self.access) else: share_rpc.ShareAPI.deny_access.assert_called_once_with( self.context, self.share_instance, self.access) utils.wait_for_access_update.assert_called_once_with( self.context, self.helper.db, self.share_instance, data_copy_helper.CONF.data_access_wait_access_rules_timeout) @ddt.data({'proto': 'GLUSTERFS', 'conf': None}, {'proto': 'GLUSTERFS', 'conf': 'cert'}, {'proto': 'OTHERS', 'conf': None}, {'proto': 'OTHERS', 'conf': 'ip'}) @ddt.unpack def test_allow_access_to_data_service(self, proto, conf): share = db_utils.create_share(share_proto=proto) access_allow = {'access_type': conf, 'access_to': conf, 'access_level': constants.ACCESS_LEVEL_RW} data_copy_helper.CONF.set_default('data_node_access_cert', conf) data_copy_helper.CONF.set_default('data_node_access_ip', conf) # mocks self.mock_object(self.helper, '_allow_data_access', mock.Mock(return_value=self.access)) # run and asserts if conf: result = self.helper.allow_access_to_data_service( share, 'ins1_id', 'ins2_id') self.assertEqual(self.access, result) self.helper._allow_data_access.assert_called_once_with( access_allow, 'ins1_id', 'ins2_id') else: self.assertRaises(exception.ShareDataCopyFailed, self.helper.allow_access_to_data_service, share, 'ins1_id', 'ins2_id') def test_mount_share_instance(self): fake_path = ''.join(('/fake_path/', self.share_instance['id'])) # mocks self.mock_object(utils, 'execute') self.mock_object(os.path, 'exists', mock.Mock( side_effect=[False, False, True])) self.mock_object(os, 'makedirs') # run self.helper.mount_share_instance( 'mount %(path)s', '/fake_path', self.share_instance['id']) # asserts utils.execute.assert_called_once_with('mount', fake_path, run_as_root=True) os.makedirs.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path) ]) def test_unmount_share_instance(self): fake_path = ''.join(('/fake_path/', self.share_instance['id'])) # mocks self.mock_object(utils, 'execute') self.mock_object(os.path, 'exists', mock.Mock( side_effect=[True, True, False])) self.mock_object(os, 'rmdir') # run self.helper.unmount_share_instance( 'unmount %(path)s', '/fake_path', self.share_instance['id']) # asserts utils.execute.assert_called_once_with('unmount', fake_path, run_as_root=True) os.rmdir.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path) ]) manila-2.0.0/manila/tests/data/test_utils.py0000664000567000056710000002317612701407107022210 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from manila.data import utils as data_utils from manila import test from manila import utils class CopyClassTestCase(test.TestCase): def setUp(self): super(CopyClassTestCase, self).setUp() src = '/path/fake/src' dest = '/path/fake/dst' ignore_list = ['item'] self._copy = data_utils.Copy(src, dest, ignore_list) self._copy.total_size = 10000 self._copy.current_size = 100 self._copy.current_copy = {'file_path': '/fake/path', 'size': 100} self.mock_log = self.mock_object(data_utils, 'LOG') def test_get_progress(self): expected = {'total_progress': 1, 'current_file_path': '/fake/path', 'current_file_progress': 100} # mocks self.mock_object(utils, 'execute', mock.Mock(return_value=("100", ""))) # run out = self._copy.get_progress() # asserts self.assertEqual(expected, out) utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", run_as_root=True) def test_get_progress_current_copy_none(self): self._copy.current_copy = None expected = {'total_progress': 100} # run out = self._copy.get_progress() # asserts self.assertEqual(expected, out) def test_get_progress_exception(self): expected = {'total_progress': 1, 'current_file_path': '/fake/path', 'current_file_progress': 0} # mocks self.mock_object( utils, 'execute', mock.Mock(side_effect=utils.processutils.ProcessExecutionError())) # run out = self._copy.get_progress() # asserts self.assertEqual(expected, out) utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", run_as_root=True) def test_cancel(self): self._copy.cancelled = False # run self._copy.cancel() # asserts self.assertEqual(self._copy.cancelled, True) # reset self._copy.cancelled = False def test_get_total_size(self): self._copy.total_size = 0 values = [("folder1/\nitem/\nfile1\nitem", ""), ("", ""), ("10000", "")] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(self._copy.total_size, 10000) utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call("stat", "-c", "%s", os.path.join(self._copy.src, "file1"), run_as_root=True) ]) def test_get_total_size_cancelled_1(self): self._copy.total_size = 0 self._copy.cancelled = True # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(self._copy.total_size, 0) # reset self._copy.total_size = 10000 self._copy.cancelled = False def test_get_total_size_cancelled_2(self): self._copy.total_size = 0 def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(self._copy.total_size, 0) utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.total_size = 10000 self._copy.cancelled = False def test_copy_data(self): values = [("folder1/\nitem/\nfile1\nitem", ""), "", ("", ""), ("10000", ""), ""] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) self.mock_object(self._copy, 'get_progress') # run self._copy.copy_data(self._copy.src) # asserts self._copy.get_progress.assert_called_once_with() utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("mkdir", "-p", os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call("stat", "-c", "%s", os.path.join(self._copy.src, "file1"), run_as_root=True), mock.call("cp", "-P", "--preserve=all", os.path.join(self._copy.src, "file1"), os.path.join(self._copy.dest, "file1"), run_as_root=True) ]) def test_copy_data_cancelled_1(self): self._copy.cancelled = True # run self._copy.copy_data(self._copy.src) # reset self._copy.cancelled = False def test_copy_data_cancelled_2(self): def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.copy_data(self._copy.src) # asserts utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.cancelled = False def test_copy_stats(self): values = [("folder1/\nitem/\nfile1\nitem", ""), ("", ""), "", "", "", "", "", ""] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) # run self._copy.copy_stats(self._copy.src) # asserts utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call( "chmod", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call( "touch", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call( "chown", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), ]) def test_copy_stats_cancelled_1(self): self._copy.cancelled = True # run self._copy.copy_stats(self._copy.src) # reset self._copy.cancelled = False def test_copy_stats_cancelled_2(self): def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.copy_stats(self._copy.src) # asserts utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.cancelled = False def test_run(self): # mocks self.mock_object(self._copy, 'get_total_size') self.mock_object(self._copy, 'copy_data') self.mock_object(self._copy, 'copy_stats') self.mock_object(self._copy, 'get_progress') # run self._copy.run() # asserts self.assertTrue(data_utils.LOG.info.called) self._copy.get_total_size.assert_called_once_with(self._copy.src) self._copy.copy_data.assert_called_once_with(self._copy.src) self._copy.copy_stats.assert_called_once_with(self._copy.src) self._copy.get_progress.assert_called_once_with() manila-2.0.0/manila/tests/data/test_manager.py0000664000567000056710000003676112701407107022466 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Data Manager """ import ddt import mock from manila.common import constants from manila import context from manila.data import helper from manila.data import manager from manila.data import utils as data_utils from manila import db from manila import exception from manila.share import rpcapi as share_rpc from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class DataManagerTestCase(test.TestCase): """Test case for data manager.""" def setUp(self): super(DataManagerTestCase, self).setUp() self.manager = manager.DataManager() self.context = context.get_admin_context() self.topic = 'fake_topic' self.share = db_utils.create_share() manager.CONF.set_default('migration_tmp_location', '/tmp/') def test_init(self): manager = self.manager self.assertIsNotNone(manager) @ddt.data(constants.TASK_STATE_DATA_COPYING_COMPLETING, constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) def test_init_host(self, status): share = db_utils.create_share( task_state=status) # mocks self.mock_object(db, 'share_get_all', mock.Mock( return_value=[share])) self.mock_object(db, 'share_update') # run self.manager.init_host() # asserts db.share_get_all.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) db.share_update.assert_called_with( utils.IsAMatcher(context.RequestContext), share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) @ddt.data({'notify': True, 'exc': None}, {'notify': False, 'exc': None}, {'notify': 'fake', 'exc': exception.ShareDataCopyCancelled(src_instance='ins1', dest_instance='ins2')}, {'notify': 'fake', 'exc': Exception('fake')}) @ddt.unpack def test_migration_start(self, notify, exc): # mocks self.mock_object(db, 'share_get', mock.Mock(return_value=self.share)) self.mock_object(data_utils, 'Copy', mock.Mock(return_value='fake_copy')) if exc is None: self.manager.busy_tasks_shares[self.share['id']] = 'fake_copy' self.mock_object(self.manager, '_copy_share_data', mock.Mock(side_effect=exc)) self.mock_object(share_rpc.ShareAPI, 'migration_complete') if exc is not None and not isinstance( exc, exception.ShareDataCopyCancelled): self.mock_object(db, 'share_update') # run if exc is None or isinstance(exc, exception.ShareDataCopyCancelled): self.manager.migration_start( self.context, [], self.share['id'], 'ins1_id', 'ins2_id', 'info_src', 'info_dest', notify) else: self.assertRaises( exception.ShareDataCopyFailed, self.manager.migration_start, self.context, [], self.share['id'], 'ins1_id', 'ins2_id', 'info_src', 'info_dest', notify) db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) # asserts self.assertFalse(self.manager.busy_tasks_shares.get(self.share['id'])) self.manager._copy_share_data.assert_called_once_with( self.context, 'fake_copy', self.share, 'ins1_id', 'ins2_id', 'info_src', 'info_dest') if notify or exc: share_rpc.ShareAPI.migration_complete.assert_called_once_with( self.context, self.share, 'ins1_id', 'ins2_id') @ddt.data({'cancelled': False, 'exc': None}, {'cancelled': False, 'exc': Exception('fake')}, {'cancelled': True, 'exc': None}) @ddt.unpack def test__copy_share_data(self, cancelled, exc): access = db_utils.create_access(share_id=self.share['id']) migration_info_src = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} migration_info_dest = {'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest'} get_progress = {'total_progress': 100} # mocks fake_copy = mock.MagicMock(cancelled=cancelled) self.mock_object(db, 'share_update') self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=access)) self.mock_object(helper.DataServiceHelper, 'mount_share_instance') self.mock_object(fake_copy, 'run', mock.Mock(side_effect=exc)) self.mock_object(fake_copy, 'get_progress', mock.Mock(return_value=get_progress)) self.mock_object(helper.DataServiceHelper, 'unmount_share_instance', mock.Mock(side_effect=Exception('fake'))) self.mock_object(helper.DataServiceHelper, 'deny_access_to_data_service', mock.Mock(side_effect=Exception('fake'))) extra_updates = None # run if cancelled: self.assertRaises( exception.ShareDataCopyCancelled, self.manager._copy_share_data, self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) extra_updates = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_CANCELLED}) ] elif exc: self.assertRaises( exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) else: self.manager._copy_share_data( self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) extra_updates = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED}) ] # asserts self.assertEqual( self.manager.busy_tasks_shares[self.share['id']], fake_copy) update_list = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}), ] if extra_updates: update_list = update_list + extra_updates db.share_update.assert_has_calls(update_list) helper.DataServiceHelper.allow_access_to_data_service.\ assert_called_once_with(self.share, 'ins1_id', 'ins2_id') helper.DataServiceHelper.mount_share_instance.assert_has_calls([ mock.call(migration_info_src['mount'], '/tmp/', 'ins1_id'), mock.call(migration_info_dest['mount'], '/tmp/', 'ins2_id')]) fake_copy.run.assert_called_once_with() if exc is None: fake_copy.get_progress.assert_called_once_with() helper.DataServiceHelper.unmount_share_instance.assert_has_calls([ mock.call(migration_info_src['unmount'], '/tmp/', 'ins1_id'), mock.call(migration_info_dest['unmount'], '/tmp/', 'ins2_id')]) helper.DataServiceHelper.deny_access_to_data_service.assert_has_calls([ mock.call(access, 'ins1_id'), mock.call(access, 'ins2_id')]) def test__copy_share_data_exception_access(self): migration_info_src = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} migration_info_dest = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object( helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock( side_effect=exception.ShareDataCopyFailed(reason='fake'))) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper.DataServiceHelper.allow_access_to_data_service.\ assert_called_once_with(self.share, 'ins1_id', 'ins2_id') def test__copy_share_data_exception_mount_1(self): access = db_utils.create_access(share_id=self.share['id']) migration_info_src = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} migration_info_dest = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=access)) self.mock_object(helper.DataServiceHelper, 'mount_share_instance', mock.Mock(side_effect=Exception('fake'))) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper.DataServiceHelper.allow_access_to_data_service.\ assert_called_once_with(self.share, 'ins1_id', 'ins2_id') helper.DataServiceHelper.mount_share_instance.assert_called_once_with( migration_info_src['mount'], '/tmp/', 'ins1_id') helper.DataServiceHelper.cleanup_temp_folder.assert_called_once_with( 'ins1_id', '/tmp/') helper.DataServiceHelper.cleanup_data_access.assert_has_calls([ mock.call(access, 'ins2_id'), mock.call(access, 'ins1_id')]) def test__copy_share_data_exception_mount_2(self): access = db_utils.create_access(share_id=self.share['id']) migration_info_src = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} migration_info_dest = {'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src'} fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=access)) self.mock_object(helper.DataServiceHelper, 'mount_share_instance', mock.Mock(side_effect=[None, Exception('fake')])) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') self.mock_object(helper.DataServiceHelper, 'cleanup_unmount_temp_folder') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, self.share, 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper.DataServiceHelper.allow_access_to_data_service.\ assert_called_once_with(self.share, 'ins1_id', 'ins2_id') helper.DataServiceHelper.mount_share_instance.assert_has_calls([ mock.call(migration_info_src['mount'], '/tmp/', 'ins1_id'), mock.call(migration_info_dest['mount'], '/tmp/', 'ins2_id')]) helper.DataServiceHelper.cleanup_unmount_temp_folder.\ assert_called_once_with( migration_info_src['unmount'], '/tmp/', 'ins1_id') helper.DataServiceHelper.cleanup_temp_folder.assert_has_calls([ mock.call('ins2_id', '/tmp/'), mock.call('ins1_id', '/tmp/')]) helper.DataServiceHelper.cleanup_data_access.assert_has_calls([ mock.call(access, 'ins2_id'), mock.call(access, 'ins1_id')]) def test_data_copy_cancel(self): share = db_utils.create_share() self.manager.busy_tasks_shares[share['id']] = data_utils.Copy # mocks self.mock_object(data_utils.Copy, 'cancel') # run self.manager.data_copy_cancel(self.context, share['id']) # asserts data_utils.Copy.cancel.assert_called_once_with() def test_data_copy_cancel_not_copying(self): self.assertRaises(exception.InvalidShare, self.manager.data_copy_cancel, self.context, 'fake_id') def test_data_copy_get_progress(self): share = db_utils.create_share() self.manager.busy_tasks_shares[share['id']] = data_utils.Copy expected = 'fake_progress' # mocks self.mock_object(data_utils.Copy, 'get_progress', mock.Mock(return_value=expected)) # run result = self.manager.data_copy_get_progress(self.context, share['id']) # asserts self.assertEqual(expected, result) data_utils.Copy.get_progress.assert_called_once_with() def test_data_copy_get_progress_not_copying(self): self.assertRaises(exception.InvalidShare, self.manager.data_copy_get_progress, self.context, 'fake_id') manila-2.0.0/manila/tests/data/test_rpcapi.py0000664000567000056710000000740412701407107022322 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.data.rpcapi """ import copy import mock from oslo_config import cfg from oslo_serialization import jsonutils from manila.common import constants from manila import context from manila.data import rpcapi as data_rpcapi from manila import test from manila.tests import db_utils CONF = cfg.CONF class DataRpcAPITestCase(test.TestCase): def setUp(self): super(DataRpcAPITestCase, self).setUp() self.context = context.get_admin_context() share = db_utils.create_share( availability_zone=CONF.storage_availability_zone, status=constants.STATUS_AVAILABLE ) self.fake_share = jsonutils.to_primitive(share) def tearDown(self): super(DataRpcAPITestCase, self).tearDown() def _test_data_api(self, method, rpc_method, fanout=False, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = data_rpcapi.DataAPI() expected_retval = 'foo' if method == 'call' else None target = { "fanout": fanout, "version": kwargs.pop('version', '1.0'), } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_migration_start(self): self._test_data_api('migration_start', rpc_method='cast', version='1.0', share_id=self.fake_share['id'], ignore_list=[], share_instance_id='fake_ins_id', dest_share_instance_id='dest_fake_ins_id', migration_info_src={}, migration_info_dest={}, notify=True) def test_data_copy_cancel(self): self._test_data_api('data_copy_cancel', rpc_method='call', version='1.0', share_id=self.fake_share['id']) def test_data_copy_get_progress(self): self._test_data_api('data_copy_get_progress', rpc_method='call', version='1.0', share_id=self.fake_share['id']) manila-2.0.0/manila/tests/fake_utils.py0000664000567000056710000000722712701407107021225 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in manila.utils.""" import re from eventlet import greenthread import mock from oslo_log import log import six from manila import exception from manila import utils LOG = log.getLogger(__name__) _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] LOG.debug('Faked command matched %s' % fake_replier[0]) break if isinstance(reply_handler, six.string_types): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: try: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) except exception.ProcessExecutionError as e: LOG.debug('Faked command raised an exception %s', e) raise stdout = reply[0] stderr = reply[1] LOG.debug("Reply to faked command is stdout='%(stdout)s' " "stderr='%(stderr)s'.", {"stdout": stdout, "stderr": stderr}) # Replicate the sleep call in the real function greenthread.sleep(0) return reply def stub_out_utils_execute(testcase): fake_execute_set_repliers([]) fake_execute_clear_log() testcase.mock_object(utils, 'execute', fake_execute) def get_fake_lock_context(): context_manager_mock = mock.Mock() setattr(context_manager_mock, '__enter__', mock.Mock()) setattr(context_manager_mock, '__exit__', mock.Mock()) return context_manager_mock manila-2.0.0/manila/tests/test_test_utils.py0000664000567000056710000000200512701407107022322 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import test from manila.tests import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context # objects expose. self.assertTrue(ctxt.is_admin) manila-2.0.0/manila/tests/fake_network.py0000664000567000056710000001440112701407107021546 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import cfg CONF = cfg.CONF class FakeNetwork(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_net_id') self.name = kwargs.pop('name', 'net_name') self.subnets = kwargs.pop('subnets', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeSubnet(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.cidr = kwargs.pop('cidr', 'fake_cidr') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakePort(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.fixed_ips = kwargs.pop('fixed_ips', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeRouter(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_router_id') self.name = kwargs.pop('name', 'fake_router_name') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) class FakeDeviceAddr(object): def __init__(self, list_of_addresses=None): self.addresses = list_of_addresses or [ dict(ip_version=4, cidr='1.0.0.0/27'), dict(ip_version=4, cidr='2.0.0.0/27'), dict(ip_version=6, cidr='3.0.0.0/27'), ] def list(self): return self.addresses class FakeDevice(object): def __init__(self, name=None, list_of_addresses=None): self.addr = FakeDeviceAddr(list_of_addresses) self.name = name or 'fake_device_name' class API(object): """Fake Network API.""" admin_project_id = 'fake_admin_project_id' network = { "status": "ACTIVE", "subnets": ["fake_subnet_id"], "name": "fake_network", "tenant_id": "fake_tenant_id", "shared": False, "id": "fake_id", "router:external": False, } port = { "status": "ACTIVE", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "fake_network_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "fake", "binding:capabilities": {"port_filter": True}, "mac_address": "00:00:00:00:00:00", "fixed_ips": [ {"subnet_id": "56537094-98d7-430a-b513-81c4dc6d9903", "ip_address": "10.12.12.10"} ], "id": "fake_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id" } def get_all_admin_project_networks(self): net1 = self.network.copy() net1['tenant_id'] = self.admin_project_id net1['id'] = str(uuid.uuid4()) net2 = self.network.copy() net2['tenant_id'] = self.admin_project_id net2['id'] = str(uuid.uuid4()) return [net1, net2] def create_port(self, tenant_id, network_id, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None): port = self.port.copy() port['network_id'] = network_id port['admin_state_up'] = True port['tenant_id'] = tenant_id if fixed_ip: fixed_ip_dict = {'ip_address': fixed_ip} if subnet_id: fixed_ip_dict.update({'subnet_id': subnet_id}) port['fixed_ips'] = [fixed_ip_dict] if device_owner: port['device_owner'] = device_owner if device_id: port['device_id'] = device_id return port def list_ports(self, **search_opts): """List ports for the client based on search options.""" ports = [] for i in range(2): ports.append(self.port.copy()) for port in ports: port['id'] = str(uuid.uuid4()) for key, val in search_opts.items(): port[key] = val if 'id' in search_opts: return ports return ports def show_port(self, port_id): """Return the port for the client given the port id.""" port = self.port.copy() port['id'] = port_id return port def delete_port(self, port_id): pass def get_subnet(self, subnet_id): pass def subnet_create(self, *args, **kwargs): pass def router_add_interface(self, *args, **kwargs): pass def show_router(self, *args, **kwargs): pass def update_port_fixed_ips(self, *args, **kwargs): pass def router_remove_interface(self, *args, **kwargs): pass def update_subnet(self, *args, **kwargs): pass def get_all_networks(self): """Get all networks for client.""" net1 = self.network.copy() net2 = self.network.copy() net1['id'] = str(uuid.uuid4()) net2['id'] = str(uuid.uuid4()) return [net1, net2] def get_network(self, network_uuid): """Get specific network for client.""" network = self.network.copy() network['id'] = network_uuid return network def network_create(self, tenant_id, name): network = self.network.copy() network['tenant_id'] = tenant_id network['name'] = name return network manila-2.0.0/manila/tests/test_quota.py0000664000567000056710000020044112701407107021260 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_config import cfg from oslo_utils import timeutils import testtools from manila.common import constants from manila import context from manila import db from manila.db.sqlalchemy import api as sqa_api from manila.db.sqlalchemy import models as sqa_models from manila import exception from manila import quota from manila import share from manila import test from manila.tests import db_utils CONF = cfg.CONF class QuotaIntegrationTestCase(test.TestCase): def setUp(self): super(QuotaIntegrationTestCase, self).setUp() self.flags(quota_shares=2, quota_gigabytes=20) self.user_id = 'admin' self.project_id = 'admin' self.create_share = lambda size=10: ( db_utils.create_share(user_id=self.user_id, project_id=self.project_id, size=size, status=constants.STATUS_AVAILABLE) ) self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) @testtools.skip("SQLAlchemy sqlite insert bug") def test_too_many_shares(self): share_ids = [] for i in range(CONF.quota_shares): share_ref = self.create_share() share_ids.append(share_ref['id']) self.assertRaises(exception.QuotaError, share.API().create, self.context, 'nfs', 10, '', '', None) for share_id in share_ids: db.share_delete(self.context, share_id) @testtools.skip("SQLAlchemy sqlite insert bug") def test_too_many_gigabytes(self): share_ids = [] share_ref = self.create_share(size=20) share_ids.append(share_ref['id']) self.assertRaises(exception.QuotaError, share.API().create, self.context, 'cifs', 10, '', '', None) for share_id in share_ids: db.share_delete(self.context, share_id) class FakeContext(object): def __init__(self, project_id, quota_class): self.is_admin = False self.user_id = 'fake_user' self.project_id = project_id self.quota_class = quota_class self.read_deleted = 'no' def elevated(self): elevated = self.__class__(self.project_id, self.quota_class) elevated.is_admin = True return elevated class FakeDriver(object): def __init__(self, by_project=None, by_class=None, reservations=None): self.called = [] self.by_project = by_project or {} self.by_class = by_class or {} self.reservations = reservations or [] def get_by_project(self, context, project_id, resource): self.called.append(('get_by_project', context, project_id, resource)) try: return self.by_project[project_id][resource] except KeyError: raise exception.ProjectQuotaNotFound(project_id=project_id) def get_by_class(self, context, quota_class, resource): self.called.append(('get_by_class', context, quota_class, resource)) try: return self.by_class[quota_class][resource] except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) def get_defaults(self, context, resources): self.called.append(('get_defaults', context, resources)) return resources def get_class_quotas(self, context, resources, quota_class, defaults=True): self.called.append(('get_class_quotas', context, resources, quota_class, defaults)) return resources def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): self.called.append(('get_project_quotas', context, resources, project_id, quota_class, defaults, usages, remains)) return resources def limit_check(self, context, resources, values, project_id=None, user_id=None): self.called.append(('limit_check', context, resources, values, project_id, user_id)) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None): self.called.append(('reserve', context, resources, deltas, expire, project_id, user_id)) return self.reservations def commit(self, context, reservations, project_id=None, user_id=None): self.called.append(('commit', context, reservations, project_id, user_id)) def rollback(self, context, reservations, project_id=None, user_id=None): self.called.append(('rollback', context, reservations, project_id, user_id)) def destroy_all_by_project_and_user(self, context, project_id, user_id): self.called.append(('destroy_all_by_project_and_user', context, project_id, user_id)) def destroy_all_by_project(self, context, project_id): self.called.append(('destroy_all_by_project', context, project_id)) def expire(self, context): self.called.append(('expire', context)) class BaseResourceTestCase(test.TestCase): def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual('test_resource', resource.name) self.assertIsNone(resource.flag) self.assertEqual(-1, resource.default) def test_with_flag(self): # We know this flag exists, so use it... self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_shares', resource.flag) self.assertEqual(10, resource.default) def test_with_flag_no_quota(self): self.flags(quota_shares=-1) resource = quota.BaseResource('test_resource', 'quota_shares') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_shares', resource.flag) self.assertEqual(-1, resource.default) def test_quota_no_project_no_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver() context = FakeContext(None, None) quota_value = resource.quota(driver, context) self.assertEqual(10, quota_value) def test_quota_with_project_no_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_no_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(20, quota_value) def test_quota_with_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), ), by_class=dict(test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_override_project_with_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') self.assertEqual(20, quota_value) def test_quota_with_project_override_class(self): self.flags(quota_shares=10) resource = quota.BaseResource('test_resource', 'quota_shares') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=15), override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') self.assertEqual(20, quota_value) class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() self.assertEqual({}, quota_obj._resources) self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='manila.tests.test_quota.FakeDriver') self.assertEqual({}, quota_obj._resources) self.assertIsInstance(quota_obj._driver, FakeDriver) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) self.assertEqual({}, quota_obj._resources) self.assertEqual(FakeDriver, quota_obj._driver) def test_register_resource(self): quota_obj = quota.QuotaEngine() resource = quota.AbsoluteResource('test_resource') quota_obj.register_resource(resource) self.assertEqual(dict(test_resource=resource), quota_obj._resources) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.AbsoluteResource('test_resource1'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource3'), ] quota_obj.register_resources(resources) self.assertEqual(dict(test_resource1=resources[0], test_resource2=resources[1], test_resource3=resources[2], ), quota_obj._resources) def test_sync_predeclared(self): quota_obj = quota.QuotaEngine() def spam(*args, **kwargs): pass resource = quota.ReservableResource('test_resource', spam) quota_obj.register_resource(resource) self.assertEqual(spam, resource.sync) def test_sync_multi(self): quota_obj = quota.QuotaEngine() def spam(*args, **kwargs): pass resources = [ quota.ReservableResource('test_resource1', spam), quota.ReservableResource('test_resource2', spam), quota.ReservableResource('test_resource3', spam), quota.ReservableResource('test_resource4', spam), ] quota_obj.register_resources(resources[:2]) self.assertEqual(spam, resources[0].sync) self.assertEqual(spam, resources[1].sync) self.assertEqual(spam, resources[2].sync) self.assertEqual(spam, resources[3].sync) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') self.assertEqual([('get_by_project', context, 'test_project', 'test_resource'), ], driver.called) self.assertEqual(42, result) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') self.assertEqual([('get_by_class', context, 'test_class', 'test_resource'), ], driver.called) self.assertEqual(42, result) def _make_quota_obj(self, driver): quota_obj = quota.QuotaEngine(quota_driver_class=driver) resources = [ quota.AbsoluteResource('test_resource4'), quota.AbsoluteResource('test_resource3'), quota.AbsoluteResource('test_resource2'), quota.AbsoluteResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj def test_get_defaults(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) self.assertEqual([('get_defaults', context, quota_obj._resources), ], driver.called) self.assertEqual(quota_obj._resources, result) def test_get_class_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_class_quotas(context, 'test_class') result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual([ ('get_class_quotas', context, quota_obj._resources, 'test_class', True), ('get_class_quotas', context, quota_obj._resources, 'test_class', False), ], driver.called) self.assertEqual(quota_obj._resources, result1) self.assertEqual(quota_obj._resources, result2) def test_get_project_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual([ ('get_project_quotas', context, quota_obj._resources, 'test_project', None, True, True, False), ('get_project_quotas', context, quota_obj._resources, 'test_project', 'test_class', False, False, False), ], driver.called) self.assertEqual(quota_obj._resources, result1) self.assertEqual(quota_obj._resources, result2) def test_count_no_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource5', True, foo='bar') def test_count_wrong_resource(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) self.assertRaises(exception.QuotaResourceUnknown, quota_obj.count, context, 'test_resource1', True, foo='bar') def test_count(self): def fake_count(context, *args, **kwargs): self.assertEqual((True,), args) self.assertEqual(dict(foo='bar'), kwargs) return 5 context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.register_resource(quota.CountableResource('test_resource5', fake_count)) result = quota_obj.count(context, 'test_resource5', True, foo='bar') self.assertEqual(5, result) def test_limit_check(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.limit_check(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) self.assertEqual([ ('limit_check', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1,), None, None), ], driver.called) def test_reserve(self): context = FakeContext(None, None) driver = FakeDriver(reservations=['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) result3 = quota_obj.reserve(context, project_id='fake_project', test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) self.assertEqual([ ('reserve', context, quota_obj._resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), 3600, None, None), ('reserve', context, quota_obj._resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), None, 'fake_project', None), ], driver.called) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result1) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result2) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result3) def test_commit(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('commit', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ], driver.called) def test_rollback(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None, None), ], driver.called) def test_destroy_all_by_project_and_user(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project_and_user(context, 'test_project', 'fake_user') self.assertEqual([ ('destroy_all_by_project_and_user', context, 'test_project', 'fake_user'), ], driver.called) def test_destroy_all_by_project(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_all_by_project(context, 'test_project') self.assertEqual([('destroy_all_by_project', context, 'test_project'), ], driver.called) def test_expire(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) self.assertEqual([('expire', context), ], driver.called) def test_resources(self): quota_obj = self._make_quota_obj(None) self.assertEqual(['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4'], quota_obj.resources) class DbQuotaDriverTestCase(test.TestCase): expected_all_context = { "shares": {"limit": 10, "in_use": 2, "reserved": 0, }, "gigabytes": {"limit": 50, "in_use": 10, "reserved": 0, }, "snapshot_gigabytes": {"limit": 50, "in_use": 20, "reserved": 0, }, "snapshots": {"limit": 10, "in_use": 4, "reserved": 0, }, "share_networks": {"limit": 10, "in_use": 0, "reserved": 0, }, } def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.flags( quota_shares=10, quota_snapshots=10, quota_gigabytes=1000, quota_snapshot_gigabytes=1000, reservation_expire=86400, until_refresh=0, max_age=0) self.driver = quota.DbQuotaDriver() self.calls = [] self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): self.patcher.stop() super(DbQuotaDriverTestCase, self).tearDown() def test_get_defaults(self): # Use our pre-defined resources result = self.driver.get_defaults(None, quota.QUOTAS._resources) expected = { "shares": 10, "gigabytes": 1000, "snapshot_gigabytes": 1000, "snapshots": 10, "share_networks": 10, } self.assertEqual(expected, result) def _stub_quota_class_get_all_by_name(self): # Stub out quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual('test_class', quota_class) return dict(gigabytes=500, shares=10, snapshot_gigabytes=50) self.mock_object(db, 'quota_class_get_all_by_name', fake_qcgabn) def test_get_class_quotas(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class') self.assertEqual(['quota_class_get_all_by_name'], self.calls) expected = { "shares": 10, "gigabytes": 500, "snapshot_gigabytes": 50, "snapshots": 10, "share_networks": 10, } self.assertEqual(expected, result) def test_get_class_quotas_no_defaults(self): self._stub_quota_class_get_all_by_name() result = self.driver.get_class_quotas(None, quota.QUOTAS._resources, 'test_class', False) self.assertEqual(['quota_class_get_all_by_name'], self.calls) self.assertEqual( dict(shares=10, gigabytes=500, snapshot_gigabytes=50), result) def _stub_get_by_project_and_user(self): def fake_qgabpu(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') self.assertEqual('test_project', project_id) self.assertEqual('fake_user', user_id) return dict( shares=10, gigabytes=50, snapshots=10, snapshot_gigabytes=50, reserved=0) def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual('test_project', project_id) return dict( shares=10, gigabytes=50, snapshots=10, snapshot_gigabytes=50, reserved=0) def fake_qugabpu(context, project_id, user_id): self.calls.append('quota_usage_get_all_by_project_and_user') self.assertEqual('test_project', project_id) self.assertEqual('fake_user', user_id) return dict( shares=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0), snapshots=dict(in_use=4, reserved=0), snapshot_gigabytes=dict(in_use=20, reserved=0), ) self.mock_object(db, 'quota_get_all_by_project_and_user', fake_qgabpu) self.mock_object(db, 'quota_get_all_by_project', fake_qgabp) self.mock_object(db, 'quota_usage_get_all_by_project_and_user', fake_qugabpu) self._stub_quota_class_get_all_by_name() def test_get_user_quotas(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual([ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ], self.calls) self.assertEqual(self.expected_all_context, result) def _stub_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual('test_project', project_id) return dict( shares=10, gigabytes=50, snapshot_gigabytes=50, reserved=0) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual('test_project', project_id) return dict( shares=dict(in_use=2, reserved=0), snapshots=dict(in_use=4, reserved=0), snapshot_gigabytes=dict(in_use=20, reserved=0), gigabytes=dict(in_use=10, reserved=0)) self.mock_object(db, 'quota_get_all_by_project', fake_qgabp) self.mock_object(db, 'quota_usage_get_all_by_project', fake_qugabp) self._stub_quota_class_get_all_by_name() def test_get_project_quotas(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ], self.calls) self.assertEqual(self.expected_all_context, result) def test_get_project_quotas_with_remains(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', remains=True) for result_key in result: self.assertIn("remains", result[result_key]) def test_get_user_quotas_alt_context_no_class(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('other_project', None), quota.QUOTAS._resources, 'test_project', 'fake_user') self.assertEqual([ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', ], self.calls) self.assertEqual(self.expected_all_context, result) def test_get_project_quotas_alt_context_no_class(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', None), quota.QUOTAS._resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', ], self.calls) self.assertEqual(self.expected_all_context, result) def test_get_user_quotas_alt_context_with_class(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', quota_class='test_class') self.assertEqual([ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ], self.calls) self.assertEqual(self.expected_all_context, result) def test_get_project_quotas_alt_context_with_class(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS._resources, 'test_project', quota_class='test_class') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ], self.calls) self.assertEqual(self.expected_all_context, result) def test_get_user_quotas_no_defaults(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', defaults=False) self.assertEqual([ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_usage_get_all_by_project_and_user', 'quota_class_get_all_by_name', ], self.calls) expected = { "shares": {"limit": 10, "in_use": 2, "reserved": 0, }, "gigabytes": {"limit": 50, "in_use": 10, "reserved": 0, }, "snapshot_gigabytes": {"limit": 50, "in_use": 20, "reserved": 0, }, "snapshots": {"limit": 10, "in_use": 4, "reserved": 0, }, } self.assertEqual(expected, result) def test_get_project_quotas_no_defaults(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', defaults=False) self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', ], self.calls) expected = { "shares": {"limit": 10, "in_use": 2, "reserved": 0, }, "gigabytes": {"limit": 50, "in_use": 10, "reserved": 0, }, "snapshot_gigabytes": {"limit": 50, "in_use": 20, "reserved": 0, }, } self.assertEqual(expected, result) def test_get_user_quotas_no_usages(self): self._stub_get_by_project_and_user() result = self.driver.get_user_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False) self.assertEqual([ 'quota_get_all_by_project_and_user', 'quota_get_all_by_project', 'quota_class_get_all_by_name', ], self.calls) expected = { "shares": {"limit": 10, }, "gigabytes": {"limit": 50, }, "snapshot_gigabytes": {"limit": 50, }, "snapshots": {"limit": 10, }, "share_networks": {"limit": 10, }, } self.assertEqual(expected, result, result) def test_get_project_quotas_no_usages(self): self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', usages=False) self.assertEqual(['quota_get_all_by_project', 'quota_class_get_all_by_name', ], self.calls) expected = { "shares": {"limit": 10, }, "gigabytes": {"limit": 50, }, "snapshot_gigabytes": {"limit": 50, }, "snapshots": {"limit": 10, }, "share_networks": {"limit": 10, }, } self.assertEqual(expected, result) def _stub_get_settable_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): self.calls.append('get_project_quotas') result = {} for k, v in resources.items(): remains = v.default in_use = 0 result[k] = {'limit': v.default, 'in_use': in_use, 'reserved': 0, 'remains': remains} return result def fake_get_user_quotas(context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): self.calls.append('get_user_quotas') result = {} for k, v in resources.items(): in_use = 0 result[k] = {'limit': v.default, 'in_use': in_use, 'reserved': 0} return result def fake_qgabpau(context, project_id, user_id): self.calls.append('quota_get_all_by_project_and_user') return {'shares': 2} self.mock_object(self.driver, 'get_project_quotas', fake_get_project_quotas) self.mock_object(self.driver, 'get_user_quotas', fake_get_user_quotas) self.mock_object(db, 'quota_get_all_by_project_and_user', fake_qgabpau) def test_get_settable_quotas_with_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project', user_id='test_user') self.assertEqual([ 'get_project_quotas', 'get_user_quotas', 'quota_get_all_by_project_and_user', ], self.calls) expected = { "shares": {"minimum": 0, "maximum": 12, }, "gigabytes": {"minimum": 0, "maximum": 1000, }, "snapshot_gigabytes": {"minimum": 0, "maximum": 1000, }, "snapshots": {"minimum": 0, "maximum": 10, }, "share_networks": {"minimum": 0, "maximum": 10, }, } self.assertEqual(expected, result) def test_get_settable_quotas_without_user(self): self._stub_get_settable_quotas() result = self.driver.get_settable_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, 'test_project') self.assertEqual(['get_project_quotas', ], self.calls) expected = { "shares": {"minimum": 0, "maximum": -1, }, "gigabytes": {"minimum": 0, "maximum": -1, }, "snapshot_gigabytes": {"minimum": 0, "maximum": -1, }, "snapshots": {"minimum": 0, "maximum": -1, }, "share_networks": {"minimum": 0, "maximum": -1, }, } self.assertEqual(expected, result) def _stub_get_project_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True): self.calls.append('get_project_quotas') return {k: dict(limit=v.default) for k, v in resources.items()} self.mock_object(self.driver, 'get_project_quotas', fake_get_project_quotas) def test_get_quotas_has_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_unknown(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['unknown'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync_no_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['metadata_items'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_has_sync_resource(self): self._stub_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS._resources, ['shares'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync(self): self._stub_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, ['shares', 'gigabytes'], True) self.assertEqual(['get_project_quotas'], self.calls) self.assertEqual(dict(shares=10, gigabytes=1000, ), result) def _stub_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] self.mock_object(db, 'quota_reserve', fake_quota_reserve) def test_reserve_bad_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire='invalid') self.assertEqual([], self.calls) def test_reserve_default_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_int_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_timedelta_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_datetime_expire(self): self._stub_get_project_quotas() self._stub_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_until_refresh(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 500, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_max_age(self): self._stub_get_project_quotas() self._stub_quota_reserve() self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS._resources, dict(shares=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 86400), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def _stub_quota_delete_all_by_project(self): def fake_quota_delete_all_by_project(context, project_id): self.calls.append(('quota_destroy_all_by_project', project_id)) return None self.mock_object(sqa_api, 'quota_destroy_all_by_project', fake_quota_delete_all_by_project) def test_delete_by_project(self): self._stub_quota_delete_all_by_project() self.driver.destroy_all_by_project(FakeContext('test_project', 'test_class'), 'test_project') self.assertEqual([('quota_destroy_all_by_project', ('test_project')), ], self.calls) class FakeSession(object): def begin(self): return self def add(self, instance): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): return False class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): pass class QuotaReserveSqlAlchemyTestCase(test.TestCase): # manila.db.sqlalchemy.api.quota_reserve is so complex it needs its # own test case, and since it's a quota manipulator, this is the # best place to put it... def setUp(self): super(QuotaReserveSqlAlchemyTestCase, self).setUp() self.sync_called = set() def make_sync(res_name): def sync(context, project_id, user_id, session): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: return {res_name: 2} else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} return sync self.resources = {} for res_name in ('shares', 'gigabytes'): method_name = '_sync_%s' % res_name sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name) res = quota.ReservableResource(res_name, '_sync_%s' % res_name) self.resources[res_name] = res self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} self.usages_created = {} self.reservations_created = {} def fake_get_session(): return FakeSession() def fake_get_project_quota_usages(context, session, project_id): return self.usages.copy() def fake_get_user_quota_usages(context, session, project_id, user_id): return self.usages.copy() def fake_quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session=None, save=True): quota_usage_ref = self._make_quota_usage( project_id, user_id, resource, in_use, reserved, until_refresh, timeutils.utcnow(), timeutils.utcnow()) self.usages_created[resource] = quota_usage_ref return quota_usage_ref def fake_reservation_create(context, uuid, usage_id, project_id, user_id, resource, delta, expire, session=None): reservation_ref = self._make_reservation( uuid, usage_id, project_id, user_id, resource, delta, expire, timeutils.utcnow(), timeutils.utcnow()) self.reservations_created[resource] = reservation_ref return reservation_ref self.mock_object(sqa_api, 'get_session', fake_get_session) self.mock_object(sqa_api, '_get_project_quota_usages', fake_get_project_quota_usages) self.mock_object(sqa_api, '_get_user_quota_usages', fake_get_user_quota_usages) self.mock_object(sqa_api, '_quota_usage_create', fake_quota_usage_create) self.mock_object(sqa_api, '_reservation_create', fake_reservation_create) self.patcher = mock.patch.object(timeutils, 'utcnow') self.mock_utcnow = self.patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): self.patcher.stop() super(QuotaReserveSqlAlchemyTestCase, self).tearDown() def _make_quota_usage(self, project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at): quota_usage_ref = FakeUsage() quota_usage_ref.id = len(self.usages) + len(self.usages_created) quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.created_at = created_at quota_usage_ref.updated_at = updated_at quota_usage_ref.deleted_at = None quota_usage_ref.deleted = False return quota_usage_ref def init_usage(self, project_id, user_id, resource, in_use, reserved, until_refresh=None, created_at=None, updated_at=None): if created_at is None: created_at = timeutils.utcnow() if updated_at is None: updated_at = timeutils.utcnow() quota_usage_ref = self._make_quota_usage(project_id, user_id, resource, in_use, reserved, until_refresh, created_at, updated_at) self.usages[resource] = quota_usage_ref def compare_usage(self, usage_dict, expected): for usage in expected: resource = usage['resource'] for key, value in usage.items(): actual = getattr(usage_dict[resource], key) self.assertEqual(value, actual, "%s != %s on usage for resource %s" % (value, actual, resource)) def _make_reservation(self, uuid, usage_id, project_id, user_id, resource, delta, expire, created_at, updated_at): reservation_ref = sqa_models.Reservation() reservation_ref.id = len(self.reservations_created) reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.created_at = created_at reservation_ref.updated_at = updated_at reservation_ref.deleted_at = None reservation_ref.deleted = False return reservation_ref def compare_reservation(self, reservations, expected): reservations = set(reservations) for resv in expected: resource = resv['resource'] resv_obj = self.reservations_created[resource] self.assertIn(resv_obj.uuid, reservations) reservations.discard(resv_obj.uuid) for key, value in resv.items(): actual = getattr(resv_obj, key) self.assertEqual(value, actual, "%s != %s on reservation for resource %s" % (value, actual, resource)) self.assertEqual(0, len(reservations)) def test_quota_reserve_create_usages(self): context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(set(['shares', 'gigabytes']), self.sync_called) self.compare_usage(self.usages_created, [dict(resource='shares', project_id='test_project', in_use=0, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=0, reserved=2 * 1024, until_refresh=None), ]) self.compare_reservation( result, [dict(resource='shares', usage_id=self.usages_created['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages_created['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'test_user', 'shares', -1, 0, until_refresh=1) self.init_usage('test_project', 'test_user', 'gigabytes', -1, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['shares', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_until_refresh(self): self.init_usage('test_project', 'test_user', 'shares', 3, 0, until_refresh=1) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0, until_refresh=1) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['shares', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_max_age(self): max_age = 3600 record_created = (timeutils.utcnow() - datetime.timedelta(seconds=max_age)) self.init_usage('test_project', 'test_user', 'shares', 3, 0, created_at=record_created, updated_at=record_created) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, max_age) self.assertEqual(set(['shares', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=2, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_no_refresh(self): self.init_usage('test_project', 'test_user', 'shares', 3, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 3, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=3, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_unders(self): self.init_usage('test_project', 'test_user', 'shares', 1, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 1 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=1, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=1 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=-2 * 1024), ]) def test_quota_reserve_overs(self): self.init_usage('test_project', 'test_user', 'shares', 4, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 10 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=2, gigabytes=2 * 1024, ) self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=4, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=10 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.assertEqual({}, self.reservations_created) def test_quota_reserve_reduction(self): self.init_usage('test_project', 'test_user', 'shares', 10, 0) self.init_usage('test_project', 'test_user', 'gigabytes', 20 * 1024, 0) context = FakeContext('test_project', 'test_class') quotas = dict(shares=5, gigabytes=10 * 1024, ) deltas = dict(shares=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(context, self.resources, quotas, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='shares', project_id='test_project', in_use=10, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=20 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='shares', usage_id=self.usages['shares'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], project_id='test_project', delta=-2 * 1024), ]) manila-2.0.0/manila/tests/share/0000775000567000056710000000000012701407265017624 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/test_driver.py0000664000567000056710000005570612701407107022540 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share driver module.""" import time import ddt import mock from manila import exception from manila import network from manila.share import configuration from manila.share import driver from manila import test from manila.tests import utils as test_utils from manila import utils def fake_execute_with_raise(*cmd, **kwargs): raise exception.ProcessExecutionError def fake_sleep(duration): pass class ShareDriverWithExecuteMixin(driver.ShareDriver, driver.ExecuteMixin): pass @ddt.ddt class ShareDriverTestCase(test.TestCase): _SNAPSHOT_METHOD_NAMES = ["create_snapshot", "delete_snapshot", "create_share_from_snapshot"] def setUp(self): super(ShareDriverTestCase, self).setUp() self.utils = utils self.mock_object(self.utils, 'execute', fake_execute_with_raise) self.time = time self.mock_object(self.time, 'sleep', fake_sleep) driver.CONF.set_default('driver_handles_share_servers', True) def test__try_execute(self): execute_mixin = ShareDriverWithExecuteMixin( True, configuration=configuration.Configuration(None)) self.assertRaises(exception.ProcessExecutionError, execute_mixin._try_execute) def test_verify_share_driver_mode_option_type(self): data = {'DEFAULT': {'driver_handles_share_servers': 'True'}} with test_utils.create_temp_config_with_opts(data): share_driver = driver.ShareDriver([True, False]) self.assertTrue(share_driver.driver_handles_share_servers) def _instantiate_share_driver(self, network_config_group, driver_handles_share_servers, admin_network_config_group=None): self.mock_object(network, 'API') config = mock.Mock() config.append_config_values = mock.Mock() config.config_group = 'fake_config_group' config.network_config_group = network_config_group if admin_network_config_group: config.admin_network_config_group = admin_network_config_group config.safe_get = mock.Mock(return_value=driver_handles_share_servers) share_driver = driver.ShareDriver([True, False], configuration=config) self.assertTrue(hasattr(share_driver, 'configuration')) config.append_config_values.assert_called_once_with(driver.share_opts) if driver_handles_share_servers: calls = [] if network_config_group: calls.append(mock.call( config_group_name=config.network_config_group)) else: calls.append(mock.call( config_group_name=config.config_group)) if admin_network_config_group: calls.append(mock.call( config_group_name=config.admin_network_config_group, label='admin')) network.API.assert_has_calls(calls) self.assertTrue(hasattr(share_driver, 'network_api')) self.assertTrue(hasattr(share_driver, 'admin_network_api')) self.assertIsNotNone(share_driver.network_api) self.assertIsNotNone(share_driver.admin_network_api) else: self.assertFalse(hasattr(share_driver, 'network_api')) self.assertTrue(hasattr(share_driver, 'admin_network_api')) self.assertIsNone(share_driver.admin_network_api) self.assertFalse(network.API.called) return share_driver def test_instantiate_share_driver(self): self._instantiate_share_driver(None, True) def test_instantiate_share_driver_another_config_group(self): self._instantiate_share_driver("fake_network_config_group", True) def test_instantiate_share_driver_with_admin_network(self): self._instantiate_share_driver( "fake_network_config_group", True, "fake_admin_network_config_group") def test_instantiate_share_driver_no_configuration(self): self.mock_object(network, 'API') share_driver = driver.ShareDriver(True, configuration=None) self.assertIsNone(share_driver.configuration) network.API.assert_called_once_with(config_group_name=None) def test_get_share_stats_refresh_false(self): share_driver = driver.ShareDriver(True, configuration=None) share_driver._stats = {'fake_key': 'fake_value'} result = share_driver.get_share_stats(False) self.assertEqual(share_driver._stats, result) def test_get_share_stats_refresh_true(self): conf = configuration.Configuration(None) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', 'snapshot_support', ] share_driver = driver.ShareDriver(True, configuration=conf) fake_stats = {'fake_key': 'fake_value'} share_driver._stats = fake_stats result = share_driver.get_share_stats(True) self.assertNotEqual(fake_stats, result) for key in expected_keys: self.assertIn(key, result) self.assertEqual('Open Source', result['vendor_name']) @ddt.data( {'opt': True, 'allowed': True}, {'opt': True, 'allowed': (True, False)}, {'opt': True, 'allowed': [True, False]}, {'opt': True, 'allowed': set([True, False])}, {'opt': False, 'allowed': False}, {'opt': False, 'allowed': (True, False)}, {'opt': False, 'allowed': [True, False]}, {'opt': False, 'allowed': set([True, False])}) @ddt.unpack def test__verify_share_server_handling_valid_cases(self, opt, allowed): conf = configuration.Configuration(None) self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt)) share_driver = driver.ShareDriver(allowed, configuration=conf) self.assertTrue(conf.safe_get.celled) self.assertEqual(opt, share_driver.driver_handles_share_servers) @ddt.data( {'opt': False, 'allowed': True}, {'opt': True, 'allowed': False}, {'opt': None, 'allowed': True}, {'opt': 'True', 'allowed': True}, {'opt': 'False', 'allowed': False}, {'opt': [], 'allowed': True}, {'opt': True, 'allowed': []}, {'opt': True, 'allowed': ['True']}, {'opt': False, 'allowed': ['False']}) @ddt.unpack def test__verify_share_server_handling_invalid_cases(self, opt, allowed): conf = configuration.Configuration(None) self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt)) self.assertRaises( exception.ManilaException, driver.ShareDriver, allowed, configuration=conf) self.assertTrue(conf.safe_get.celled) def test_setup_server_handling_disabled(self): share_driver = self._instantiate_share_driver(None, False) # We expect successful execution, nothing to assert share_driver.setup_server('Nothing is expected to happen.') def test_setup_server_handling_enabled(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.setup_server, 'fake_network_info') def test_teardown_server_handling_disabled(self): share_driver = self._instantiate_share_driver(None, False) # We expect successful execution, nothing to assert share_driver.teardown_server('Nothing is expected to happen.') def test_teardown_server_handling_enabled(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.teardown_server, 'fake_share_server_details') def _assert_is_callable(self, obj, attr): self.assertTrue(callable(getattr(obj, attr))) @ddt.data('manage_existing', 'unmanage') def test_drivers_methods_needed_by_manage_functionality(self, method): share_driver = self._instantiate_share_driver(None, False) self._assert_is_callable(share_driver, method) @ddt.data('manage_existing_snapshot', 'unmanage_snapshot') def test_drivers_methods_needed_by_manage_snapshot_functionality( self, method): share_driver = self._instantiate_share_driver(None, False) self._assert_is_callable(share_driver, method) @ddt.data(True, False) def test_get_share_server_pools(self, value): driver.CONF.set_default('driver_handles_share_servers', value) share_driver = driver.ShareDriver(value) self.assertEqual([], share_driver.get_share_server_pools('fake_server')) @ddt.data(0.8, 1.0, 10.5, 20.0, None, '1', '1.1') def test_check_for_setup_error(self, value): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) self.mock_object(share_driver.configuration, 'safe_get', mock.Mock(return_value=value)) if value and float(value) >= 1.0: share_driver.check_for_setup_error() else: self.assertRaises(exception.InvalidParameterValue, share_driver.check_for_setup_error) def test_snapshot_support_exists(self): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None child_methods = { "create_snapshot": fake_method, "delete_snapshot": fake_method, "create_share_from_snapshot": fake_method, } child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertEqual( True, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data( ([], [], False), (_SNAPSHOT_METHOD_NAMES, [], True), (_SNAPSHOT_METHOD_NAMES, _SNAPSHOT_METHOD_NAMES, True), (_SNAPSHOT_METHOD_NAMES[0:1], _SNAPSHOT_METHOD_NAMES[1:], True), ([], _SNAPSHOT_METHOD_NAMES, True), (_SNAPSHOT_METHOD_NAMES[0:1], _SNAPSHOT_METHOD_NAMES[1:2], False), ) @ddt.unpack def test_check_redefined_driver_methods(self, common_drv_meth_names, child_drv_meth_names, expected_result): # This test covers the case of drivers inheriting other drivers or # common classes. driver.CONF.set_default('driver_handles_share_servers', True) common_drv_methods, child_drv_methods = [ {method_name: lambda *args, **kwargs: None for method_name in method_names} for method_names in (common_drv_meth_names, child_drv_meth_names)] common_drv = type( "NotRedefinedCommon", (driver.ShareDriver, ), common_drv_methods) child_drv_instance = type("NotRedefined", (common_drv, ), child_drv_methods)(True) has_redefined_methods = ( child_drv_instance._has_redefined_driver_methods( self._SNAPSHOT_METHOD_NAMES)) self.assertEqual(expected_result, has_redefined_methods) @ddt.data( (), ("create_snapshot"), ("delete_snapshot"), ("create_share_from_snapshot"), ("create_snapshot", "delete_snapshot"), ("create_snapshot", "create_share_from_snapshot"), ("delete_snapshot", "create_share_from_snapshot"), ("create_snapshot", "delete_snapshot", "create_share_from_snapshotFOO"), ("create_snapshot", "delete_snapshot", "FOOcreate_share_from_snapshot"), ) def test_snapshot_support_absent(self, methods): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None child_methods = {} for method in methods: child_methods[method] = fake_method child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertEqual( False, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_snapshot_support_not_exists_and_set_explicitly( self, snapshots_are_supported): driver.CONF.set_default('driver_handles_share_servers', True) child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), {})(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats( {"snapshot_support": snapshots_are_supported}) self.assertEqual( snapshots_are_supported, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_snapshot_support_exists_and_set_explicitly( self, snapshots_are_supported): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None child_methods = { "create_snapshot": fake_method, "delete_snapshot": fake_method, "create_share_from_snapshot": fake_method, } child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats( {"snapshot_support": snapshots_are_supported}) self.assertEqual( snapshots_are_supported, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) def test_get_periodic_hook_data(self): share_driver = self._instantiate_share_driver(None, False) share_instances = ["list", "of", "share", "instances"] result = share_driver.get_periodic_hook_data( "fake_context", share_instances) self.assertEqual(share_instances, result) def test_get_admin_network_allocations_number(self): share_driver = self._instantiate_share_driver(None, True) self.assertEqual( 0, share_driver.get_admin_network_allocations_number()) def test_allocate_admin_network_count_None(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=0)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock(side_effect=Exception('ShouldNotBeRaised'))) share_driver.allocate_admin_network(ctxt, share_server) mock_get_admin_network_allocations_number.assert_called_once_with() self.assertFalse( share_driver.admin_network_api.allocate_network.called) def test_allocate_admin_network_count_0(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=0)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock(side_effect=Exception('ShouldNotBeRaised'))) share_driver.allocate_admin_network(ctxt, share_server, count=0) self.assertFalse( share_driver.get_admin_network_allocations_number.called) self.assertFalse( share_driver.admin_network_api.allocate_network.called) def test_allocate_admin_network_count_1_api_initialized(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock()) share_driver.allocate_admin_network(ctxt, share_server) mock_get_admin_network_allocations_number.assert_called_once_with() share_driver.admin_network_api.allocate_network.\ assert_called_once_with(ctxt, share_server, count=1) def test_allocate_admin_network_count_1_api_not_initialized(self): share_driver = self._instantiate_share_driver(None, True, None) ctxt = 'fake_context' share_server = 'fake_share_server' share_driver._admin_network_api = None mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1)) self.assertRaises( exception.NetworkBadConfigurationException, share_driver.allocate_admin_network, ctxt, share_server, ) mock_get_admin_network_allocations_number.assert_called_once_with() def test_migration_start(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertEqual((None, None), share_driver.migration_start(None, None, None, None, None, None)) def test_migration_complete(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.migration_complete(None, None, None, None) def test_migration_cancel(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_cancel, None, None, None, None) def test_migration_get_progress(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_get_progress, None, None, None, None) def test_migration_get_driver_info_default(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertIsNone( share_driver.migration_get_driver_info(None, None, None), None) @ddt.data(True, False) def test_migration_get_info(self, admin): expected = {'mount': 'mount -vt fake_proto /fake/fake_id %(path)s', 'unmount': 'umount -v %(path)s'} fake_share = {'id': 'fake_id', 'share_proto': 'fake_proto', 'export_locations': [{'path': '/fake/fake_id', 'is_admin_only': admin}]} driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) migration_info = share_driver.migration_get_info( None, fake_share, "fake_server") self.assertEqual(expected, migration_info) def test_update_access(self): share_driver = driver.ShareDriver(True, configuration=None) self.assertRaises( NotImplementedError, share_driver.update_access, 'ctx', 'fake_share', 'fake_access_rules', 'fake_add_rules', 'fake_delete_rules' ) def test_create_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.create_replica, 'fake_context', ['r1', 'r2'], 'fake_new_replica', [], []) def test_delete_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.delete_replica, 'fake_context', ['r1', 'r2'], 'fake_replica', []) def test_promote_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.promote_replica, 'fake_context', [], 'fake_replica', []) def test_update_replica_state(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.update_replica_state, 'fake_context', ['r1', 'r2'], 'fake_replica', [], []) def test_create_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.create_replicated_snapshot, 'fake_context', ['r1', 'r2'], ['s1', 's2']) def test_delete_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.delete_replicated_snapshot, 'fake_context', ['r1', 'r2'], ['s1', 's2']) def test_update_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.update_replicated_snapshot, 'fake_context', ['r1', 'r2'], 'r1', ['s1', 's2'], 's1') manila-2.0.0/manila/tests/share/test_migration.py0000664000567000056710000003322312701407107023224 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import time from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import api as share_api from manila.share import migration from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class ShareMigrationHelperTestCase(test.TestCase): """Tests ShareMigrationHelper.""" def setUp(self): super(ShareMigrationHelperTestCase, self).setUp() self.share = db_utils.create_share() self.share_instance = db_utils.create_share_instance( share_id=self.share['id'], share_network_id='fake_network_id') self.context = context.get_admin_context() self.helper = migration.ShareMigrationHelper( self.context, db, self.share) def test_delete_instance_and_wait(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share_instance, exception.NotFound()])) self.mock_object(time, 'sleep') # run self.helper.delete_instance_and_wait(self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_has_calls([ mock.call(self.context, self.share_instance['id']), mock.call(self.context, self.share_instance['id'])]) time.sleep.assert_called_once_with(1) def test_delete_instance_and_wait_timeout(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share_instance, None])) self.mock_object(time, 'sleep') now = time.time() timeout = now + 310 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) # run self.assertRaises(exception.ShareMigrationFailed, self.helper.delete_instance_and_wait, self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( self.context, self.share_instance['id']) time.time.assert_has_calls([mock.call(), mock.call()]) def test_delete_instance_and_wait_not_found(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=exception.NotFound)) # run self.helper.delete_instance_and_wait(self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( self.context, self.share_instance['id']) def test_create_instance_and_wait(self): host = {'host': 'fake_host'} share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, share_network_id='fake_network_id') share_instance_available = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[share_instance_creating, share_instance_available])) self.mock_object(time, 'sleep') # run self.helper.create_instance_and_wait(self.share, share_instance_creating, host) # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], 'fake_host') db.share_instance_get.assert_has_calls([ mock.call(self.context, share_instance_creating['id'], with_share_data=True), mock.call(self.context, share_instance_creating['id'], with_share_data=True)]) time.sleep.assert_called_once_with(1) def test_create_instance_and_wait_status_error(self): host = {'host': 'fake_host'} share_instance_error = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_ERROR, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_error)) self.mock_object(self.helper, 'cleanup_new_instance') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_error)) # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, self.share, self.share_instance, host) # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], 'fake_host') db.share_instance_get.assert_called_once_with( self.context, share_instance_error['id'], with_share_data=True) self.helper.cleanup_new_instance.assert_called_once_with( share_instance_error) def test_create_instance_and_wait_timeout(self): host = {'host': 'fake_host'} share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) self.mock_object(self.helper, 'cleanup_new_instance') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_creating)) self.mock_object(time, 'sleep') now = time.time() timeout = now + 310 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, self.share, self.share_instance, host) # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, self.share_instance['share_network_id'], 'fake_host') db.share_instance_get.assert_called_once_with( self.context, share_instance_creating['id'], with_share_data=True) time.time.assert_has_calls([mock.call(), mock.call()]) self.helper.cleanup_new_instance.assert_called_once_with( share_instance_creating) def test_change_to_read_only_with_ro_support(self): share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', access_level='rw') server = db_utils.create_share_server(share_id=self.share['id']) # mocks share_driver = mock.Mock() self.mock_object(share_driver, 'update_access') self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) # run self.helper.change_to_read_only(share_instance, server, True, share_driver) # asserts db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instance['id']) share_driver.update_access.assert_called_once_with( self.context, share_instance, [access], add_rules=[], delete_rules=[], share_server=server) def test_change_to_read_only_without_ro_support(self): share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', access_level='rw') server = db_utils.create_share_server(share_id=self.share['id']) # mocks share_driver = mock.Mock() self.mock_object(share_driver, 'update_access') self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) # run self.helper.change_to_read_only(share_instance, server, False, share_driver) # asserts db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instance['id']) share_driver.update_access.assert_called_once_with( self.context, share_instance, [], add_rules=[], delete_rules=[access], share_server=server) def test_revert_access_rules(self): share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', access_level='rw') server = db_utils.create_share_server(share_id=self.share['id']) # mocks share_driver = mock.Mock() self.mock_object(share_driver, 'update_access') self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) # run self.helper.revert_access_rules(share_instance, server, share_driver) # asserts db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instance['id']) share_driver.update_access.assert_called_once_with( self.context, share_instance, [access], add_rules=[], delete_rules=[], share_server=server) def test_apply_new_access_rules(self): new_share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE, access_rules_status='active') access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', access_level='rw') # mocks self.mock_object(db, 'share_instance_access_copy') self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) self.mock_object(share_api.API, 'allow_access_to_instance') self.mock_object(utils, 'wait_for_access_update') # run self.helper.apply_new_access_rules(new_share_instance) # asserts db.share_instance_access_copy(self.context, self.share['id'], new_share_instance['id']) db.share_access_get_all_for_instance.assert_called_once_with( self.context, new_share_instance['id']) share_api.API.allow_access_to_instance.assert_called_with( self.context, new_share_instance, [access]) utils.wait_for_access_update.assert_called_with( self.context, db, new_share_instance, self.helper.migration_wait_access_rules_timeout) @ddt.data(None, Exception('fake')) def test_cleanup_new_instance(self, exc): # mocks self.mock_object(self.helper, 'delete_instance_and_wait', mock.Mock(side_effect=exc)) self.mock_object(migration.LOG, 'warning') # run self.helper.cleanup_new_instance(self.share_instance) # asserts self.helper.delete_instance_and_wait.assert_called_once_with( self.share_instance) if exc: migration.LOG.warning.called @ddt.data(None, Exception('fake')) def test_cleanup_access_rules(self, exc): # mocks server = db_utils.create_share_server() share_driver = mock.Mock() self.mock_object(self.helper, 'revert_access_rules', mock.Mock(side_effect=exc)) self.mock_object(migration.LOG, 'warning') # run self.helper.cleanup_access_rules(self.share_instance, server, share_driver) # asserts self.helper.revert_access_rules.assert_called_once_with( self.share_instance, server, share_driver) if exc: migration.LOG.warning.called manila-2.0.0/manila/tests/share/test_share_types.py0000664000567000056710000002525712701407107023571 0ustar jenkinsjenkins00000000000000# Copyright 2015 Deutsche Telekom AG. All rights reserved. # Copyright 2015 Tom Barron. All rights reserved. # Copyright 2015 Mirantis, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Share Type methods for Manila.""" import copy import datetime import ddt import mock from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import share_types from manila import test def create_share_type_dict(extra_specs=None): return { 'fake_type': { 'name': 'fake1', 'extra_specs': extra_specs } } @ddt.ddt class ShareTypesTestCase(test.TestCase): fake_type = { 'test': { 'created_at': datetime.datetime(2015, 1, 22, 11, 43, 24), 'deleted': '0', 'deleted_at': None, 'extra_specs': {}, 'required_extra_specs': {}, 'id': u'fooid-1', 'name': u'test', 'updated_at': None } } fake_extra_specs = {u'gold': u'True'} fake_share_type_id = u'fooid-2' fake_type_w_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': u'test_with_extra', 'updated_at': None } } fake_type_w_valid_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' }, 'required_extra_specs': { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' }, 'id': u'fooid-2', 'name': u'test_with_extra', 'updated_at': None } } fake_types = fake_type.copy() fake_types.update(fake_type_w_extra) fake_types.update(fake_type_w_valid_extra) fake_share = {'id': u'fooid-1', 'share_type_id': fake_share_type_id} def setUp(self): super(ShareTypesTestCase, self).setUp() self.context = context.get_admin_context() @ddt.data({}, fake_type, fake_type_w_extra, fake_types) def test_get_all_types(self, share_type): self.mock_object(db, 'share_type_get_all', mock.Mock(return_value=copy.deepcopy(share_type))) returned_type = share_types.get_all_types(self.context) self.assertItemsEqual(share_type, returned_type) def test_get_all_types_search(self): share_type = self.fake_type_w_extra search_filter = {"extra_specs": {"gold": "True"}, 'is_public': True} self.mock_object(db, 'share_type_get_all', mock.Mock(return_value=share_type)) returned_type = share_types.get_all_types(self.context, search_opts=search_filter) db.share_type_get_all.assert_called_once_with( mock.ANY, 0, filters={'is_public': True}) self.assertItemsEqual(share_type, returned_type) search_filter = {"extra_specs": {"gold": "False"}} returned_type = share_types.get_all_types(self.context, search_opts=search_filter) self.assertEqual({}, returned_type) def test_get_share_type_extra_specs(self): share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) id = share_type['id'] extra_spec = share_types.get_share_type_extra_specs(id, key='gold') self.assertEqual(share_type['extra_specs']['gold'], extra_spec) extra_spec = share_types.get_share_type_extra_specs(id) self.assertEqual(share_type['extra_specs'], extra_spec) def test_share_types_diff(self): share_type1 = self.fake_type['test'] share_type2 = self.fake_type_w_extra['test_with_extra'] expeted_diff = {'extra_specs': {u'gold': (None, u'True')}} self.mock_object(db, 'share_type_get', mock.Mock(side_effect=[share_type1, share_type2])) (diff, equal) = share_types.share_types_diff(self.context, share_type1['id'], share_type2['id']) self.assertFalse(equal) self.assertEqual(expeted_diff, diff) def test_share_types_diff_equal(self): share_type = self.fake_type['test'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) (diff, equal) = share_types.share_types_diff(self.context, share_type['id'], share_type['id']) self.assertTrue(equal) def test_get_extra_specs_from_share(self): expected = self.fake_extra_specs self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=expected)) spec_value = share_types.get_extra_specs_from_share(self.fake_share) self.assertEqual(expected, spec_value) share_types.get_share_type_extra_specs.assert_called_once_with( self.fake_share_type_id) @ddt.data({}, {"fake": "fake"}, {constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: None}) def test_create_without_required_extra_spec(self, extra_specs): name = "fake_share_type" self.assertRaises(exception.InvalidShareType, share_types.create, self.context, name, extra_specs) def test_get_share_type_required_extra_specs(self): valid_required_extra_specs = ( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS,) actual_result = share_types.get_required_extra_specs() self.assertEqual(valid_required_extra_specs, actual_result) def test_validate_required_extra_spec_other(self): actual_result = share_types.is_valid_required_extra_spec( 'fake', 'fake') self.assertIsNone(actual_result) @ddt.data('1', 'True', 'false', '0', True, False) def test_validate_required_extra_spec_valid(self, value): key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS actual_result = share_types.is_valid_required_extra_spec(key, value) self.assertTrue(actual_result) @ddt.data('invalid', {}, '0000000000') def test_validate_required_extra_spec_invalid(self, value): key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS actual_result = share_types.is_valid_required_extra_spec(key, value) self.assertFalse(actual_result) @ddt.data({constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'}, {constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true', 'another_key': True}) def test_get_valid_required_extra_specs_valid(self, specs): actual_result = share_types.get_valid_required_extra_specs(specs) valid_result = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' } self.assertEqual(valid_result, actual_result) @ddt.data(None, {}) def test_get_valid_required_extra_specs_invalid(self, specs): self.assertRaises(exception.InvalidExtraSpec, share_types.get_valid_required_extra_specs, specs) def test_add_access(self): project_id = '456' extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' } share_type = share_types.create(self.context, 'type1', extra_specs) share_type_id = share_type.get('id') share_types.add_share_type_access(self.context, share_type_id, project_id) stype_access = db.share_type_access_get_all(self.context, share_type_id) self.assertIn(project_id, [a.project_id for a in stype_access]) def test_add_access_invalid(self): self.assertRaises(exception.InvalidShareType, share_types.add_share_type_access, 'fake', None, 'fake') def test_remove_access(self): project_id = '456' extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' } share_type = share_types.create( self.context, 'type1', projects=['456'], extra_specs=extra_specs) share_type_id = share_type.get('id') share_types.remove_share_type_access(self.context, share_type_id, project_id) stype_access = db.share_type_access_get_all(self.context, share_type_id) self.assertNotIn(project_id, stype_access) def test_remove_access_invalid(self): self.assertRaises(exception.InvalidShareType, share_types.remove_share_type_access, 'fake', None, 'fake') @ddt.data({'spec_value': ' True', 'expected': True}, {'spec_value': 'true', 'expected': True}, {'spec_value': ' False', 'expected': False}, {'spec_value': 'false', 'expected': False}, {'spec_value': u' FaLsE ', 'expected': False}) @ddt.unpack def test_parse_boolean_extra_spec(self, spec_value, expected): result = share_types.parse_boolean_extra_spec('fake_key', spec_value) self.assertEqual(expected, result) @ddt.data('True', 'False', ' True', ' Wrong', None, 5) def test_parse_boolean_extra_spec_invalid(self, spec_value): self.assertRaises(exception.InvalidExtraSpec, share_types.parse_boolean_extra_spec, 'fake_key', spec_value) manila-2.0.0/manila/tests/share/__init__.py0000664000567000056710000000000012701407107021716 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/test_api.py0000664000567000056710000031110512701407112021776 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share API module.""" import copy import datetime import uuid import ddt import mock from oslo_config import cfg from oslo_utils import timeutils from manila.common import constants from manila import context from manila.data import rpcapi as data_rpc from manila import db as db_api from manila.db.sqlalchemy import models from manila import exception from manila import quota from manila import share from manila.share import api as share_api from manila.share import rpcapi as share_rpc from manila.share import share_types from manila import test from manila.tests import db_utils from manila.tests import fake_share as fakes from manila.tests import utils as test_utils from manila import utils CONF = cfg.CONF def fake_share(id, **kwargs): share = { 'id': id, 'size': 1, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'snapshot_id': None, 'share_network_id': None, 'share_type_id': None, 'availability_zone': 'fakeaz', 'status': 'fakestatus', 'display_name': 'fakename', 'metadata': None, 'display_description': 'fakedesc', 'share_proto': 'nfs', 'export_location': 'fake_location', 'host': 'fakehost', 'is_public': False, 'consistency_group_id': None, 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1) } share.update(kwargs) return share def fake_snapshot(id, **kwargs): snapshot = { 'id': id, 'share_size': 1, 'size': 1, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'share_id': None, 'availability_zone': 'fakeaz', 'status': 'fakestatus', 'display_name': 'fakename', 'display_description': 'fakedesc', 'share_proto': 'nfs', 'progress': 'fakeprogress99%', 'scheduled_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'terminated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share': {'host': 'fake_source_host'}, } snapshot.update(kwargs) return snapshot def fake_access(id, **kwargs): access = { 'id': id, 'share_id': 'fakeshareid', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'access_level': 'rw', 'state': 'fakeactive', 'STATE_NEW': 'fakenew', 'STATE_ACTIVE': 'fakeactive', 'STATE_DELETING': 'fakedeleting', 'STATE_DELETED': 'fakedeleted', 'STATE_ERROR': 'fakeerror', } access.update(kwargs) return access _FAKE_LIST_OF_ALL_SHARES = [ { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_1', 'share_server_id': 'fake_server_1', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_2', }, { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', }, ] _FAKE_LIST_OF_ALL_SNAPSHOTS = [ { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_1', 'share_id': 'fake_server_1', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_id': 'fake_server_2', }, { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_2', 'share_id': 'fake_share_id_3', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_id': 'fake_share_id_3', }, ] @ddt.ddt class ShareAPITestCase(test.TestCase): def setUp(self): super(ShareAPITestCase, self).setUp() self.context = context.get_admin_context() self.scheduler_rpcapi = mock.Mock() self.share_rpcapi = mock.Mock() self.api = share.API() self.mock_object(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) self.mock_object(self.api, 'share_rpcapi', self.share_rpcapi) self.mock_object(quota.QUOTAS, 'reserve', lambda *args, **kwargs: None) self.dt_utc = datetime.datetime.utcnow() self.mock_object(timeutils, 'utcnow', mock.Mock(return_value=self.dt_utc)) self.mock_object(share_api.policy, 'check_policy') def _setup_create_mocks(self, protocol='nfs', **kwargs): share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, share_type_id=kwargs.pop('share_type_id', 'fake'), **kwargs ) share_data = { 'share_proto': protocol, 'size': 1, 'display_name': 'fakename', 'display_description': 'fakedesc', 'availability_zone': 'fakeaz' } self.mock_object(db_api, 'share_create', mock.Mock(return_value=share)) self.mock_object(self.api, 'create_instance') return share, share_data def _setup_create_instance_mocks(self): host = 'fake' share_type_id = "fake_share_type" share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, create_share_instance=False, share_type_id=share_type_id, ) share_instance = db_utils.create_share_instance(share_id=share['id']) share_type = {'fake': 'fake'} self.mock_object(db_api, 'share_instance_create', mock.Mock(return_value=share_instance)) self.mock_object(db_api, 'share_type_get', mock.Mock(return_value=share_type)) az_mock = mock.Mock() type(az_mock.return_value).id = mock.PropertyMock( return_value='fake_id') self.mock_object(db_api, 'availability_zone_get', az_mock) self.mock_object(self.api.share_rpcapi, 'create_share_instance') self.mock_object(self.api.scheduler_rpcapi, 'create_share_instance') return host, share, share_instance def _setup_create_from_snapshot_mocks(self, use_scheduler=True, host=None): CONF.set_default("use_scheduler_creating_share_from_snapshot", use_scheduler) share_type = fakes.fake_share_type() original_share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host=host if host else 'fake', size=1, share_type_id=share_type['id'], ) snapshot = db_utils.create_snapshot( share_id=original_share['id'], status=constants.STATUS_AVAILABLE, size=1 ) share, share_data = self._setup_create_mocks( snapshot_id=snapshot['id'], share_type_id=share_type['id']) request_spec = { 'share_properties': share.to_dict(), 'share_proto': share['share_proto'], 'share_id': share['id'], 'share_type': None, 'snapshot_id': share['snapshot_id'], } self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) return snapshot, share, share_data, request_spec def _setup_delete_mocks(self, status, snapshots=None, **kwargs): if snapshots is None: snapshots = [] share = db_utils.create_share(status=status, **kwargs) self.mock_object(db_api, 'share_delete') self.mock_object(db_api, 'share_server_update') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object(self.api, 'delete_instance') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') return share def _setup_delete_share_instance_mocks(self, **kwargs): share = db_utils.create_share(**kwargs) self.mock_object(db_api, 'share_instance_update', mock.Mock(return_value=share.instance)) self.mock_object(self.api.share_rpcapi, 'delete_share_instance') self.mock_object(db_api, 'share_server_update') return share.instance def test_get_all_admin_no_filters(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) shares = self.api.get_all(ctx) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_admin_filter_by_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES)) shares = self.api.get_all(ctx, {'all_tenants': 1}) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares) def test_get_all_non_admin_filter_by_share_server(self): def fake_policy_checker(*args, **kwargs): if 'list_by_share_server_id' == args[2] and not args[0].is_admin: raise exception.NotAuthorized ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.mock_object(share_api.policy, 'check_policy', mock.Mock(side_effect=fake_policy_checker)) self.assertRaises( exception.NotAuthorized, self.api.get_all, ctx, {'share_server_id': 'fake'}, ) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), mock.call(ctx, 'share', 'list_by_share_server_id'), ]) def test_get_all_admin_filter_by_share_server_and_all_tenants(self): # NOTE(vponomaryov): if share_server_id provided, 'all_tenants' opt # should not make any influence. ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all_by_share_server', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[2:])) self.mock_object(db_api, 'share_get_all') self.mock_object(db_api, 'share_get_all_by_project') shares = self.api.get_all( ctx, {'share_server_id': 'fake_server_3', 'all_tenants': 1}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), mock.call(ctx, 'share', 'list_by_share_server_id'), ]) db_api.share_get_all_by_share_server.assert_called_once_with( ctx, 'fake_server_3', sort_dir='desc', sort_key='created_at', filters={}, ) db_api.share_get_all_by_project.assert_has_calls([]) db_api.share_get_all.assert_has_calls([]) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2:], shares) def test_get_all_admin_filter_by_name(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'name': 'bar'}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) def test_get_all_admin_filter_by_name_and_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES)) shares = self.api.get_all(ctx, {'name': 'foo', 'all_tenants': 1}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[::2], shares) def test_get_all_admin_filter_by_status(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'status': constants.STATUS_AVAILABLE}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2::4], shares) def test_get_all_admin_filter_by_status_and_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES)) shares = self.api.get_all( ctx, {'status': constants.STATUS_ERROR, 'all_tenants': 1}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) def test_get_all_non_admin_filter_by_all_tenants(self): # Expected share list only by project of non-admin user ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'all_tenants': 1}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) def test_get_all_non_admin_with_name_and_status_filters(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all( ctx, {'name': 'bar', 'status': constants.STATUS_ERROR}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) # two items expected, one filtered self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) # one item expected, two filtered shares = self.api.get_all( ctx, {'name': 'foo', 'status': constants.STATUS_AVAILABLE}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2::4], shares) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_has_calls([ mock.call(ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False), mock.call(ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False), ]) @ddt.data('True', 'true', '1', 'yes', 'y', 'on', 't', True) def test_get_all_non_admin_public(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'is_public': is_public}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=True ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) @ddt.data('False', 'false', '0', 'no', 'n', 'off', 'f', False) def test_get_all_non_admin_not_public(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'is_public': is_public}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) @ddt.data('truefoo', 'bartrue') def test_get_all_invalid_public_value(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.assertRaises(ValueError, self.api.get_all, ctx, {'is_public': is_public}) share_api.policy.check_policy.assert_has_calls([ mock.call(ctx, 'share', 'get_all'), ]) def test_get_all_with_sorting_valid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) shares = self.api.get_all(ctx, sort_key='status', sort_dir='asc') share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='asc', sort_key='status', project_id='fake_pid_1', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_sort_key_invalid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all, ctx, sort_key=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') def test_get_all_sort_dir_invalid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all, ctx, sort_dir=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') def _get_all_filter_metadata_or_extra_specs_valid(self, key): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) search_opts = {key: {'foo1': 'bar1', 'foo2': 'bar2'}} shares = self.api.get_all(ctx, search_opts=search_opts.copy()) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters=search_opts, is_public=False) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_filter_by_metadata(self): self._get_all_filter_metadata_or_extra_specs_valid(key='metadata') def test_get_all_filter_by_extra_specs(self): self._get_all_filter_metadata_or_extra_specs_valid(key='extra_specs') def _get_all_filter_metadata_or_extra_specs_invalid(self, key): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) search_opts = {key: "{'foo': 'bar'}"} self.assertRaises(exception.InvalidInput, self.api.get_all, ctx, search_opts=search_opts) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get_all') def test_get_all_filter_by_invalid_metadata(self): self._get_all_filter_metadata_or_extra_specs_invalid(key='metadata') def test_get_all_filter_by_invalid_extra_specs(self): self._get_all_filter_metadata_or_extra_specs_invalid(key='extra_specs') @ddt.data(True, False) def test_create_public_and_private_share(self, is_public): share, share_data = self._setup_create_mocks(is_public=is_public) az = share_data.pop('availability_zone') self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az ) share['status'] = constants.STATUS_CREATING share['host'] = None self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) @ddt.data('', 'fake', 'Truebar', 'Bartrue') def test_create_share_with_invalid_is_public_value(self, is_public): self.assertRaises(exception.InvalidParameterValue, self.api.create, self.context, 'nfs', '1', 'fakename', 'fakedesc', is_public=is_public) @ddt.data(*constants.SUPPORTED_SHARE_PROTOCOLS) def test_create_share_valid_protocol(self, proto): share, share_data = self._setup_create_mocks(protocol=proto) az = share_data.pop('availability_zone') all_protos = ','.join( proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS) data = dict(DEFAULT=dict(enabled_share_protocols=all_protos)) with test_utils.create_temp_config_with_opts(data): self.api.create( self.context, proto, share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az) share['status'] = constants.STATUS_CREATING share['host'] = None self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) @ddt.data( None, '', 'fake', 'nfsfake', 'cifsfake', 'glusterfsfake', 'hdfsfake') def test_create_share_invalid_protocol(self, proto): share, share_data = self._setup_create_mocks(protocol=proto) all_protos = ','.join( proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS) data = dict(DEFAULT=dict(enabled_share_protocols=all_protos)) with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.InvalidInput, self.api.create, self.context, proto, share_data['size'], share_data['display_name'], share_data['display_description']) @ddt.data({'overs': {'gigabytes': 'fake'}, 'expected_exception': exception.ShareSizeExceedsAvailableQuota}, {'overs': {'shares': 'fake'}, 'expected_exception': exception.ShareLimitExceeded}) @ddt.unpack def test_create_share_over_quota(self, overs, expected_exception): share, share_data = self._setup_create_mocks() usages = {'gigabytes': {'reserved': 5, 'in_use': 5}, 'shares': {'reserved': 10, 'in_use': 10}} quotas = {'gigabytes': 5, 'shares': 10} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'] ) quota.QUOTAS.reserve.assert_called_once_with( self.context, shares=1, gigabytes=share_data['size']) @ddt.data(exception.QuotaError, exception.InvalidShare) def test_create_share_error_on_quota_commit(self, expected_exception): share, share_data = self._setup_create_mocks() reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit', mock.Mock(side_effect=expected_exception('fake'))) self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(db_api, 'share_delete') self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'] ) quota.QUOTAS.rollback.assert_called_once_with(self.context, reservation) db_api.share_delete.assert_called_once_with(self.context, share['id']) def test_create_share_instance_with_host_and_az(self): host, share, share_instance = self._setup_create_instance_mocks() self.api.create_instance(self.context, share, host=host, availability_zone='fake') db_api.share_instance_create.assert_called_once_with( self.context, share['id'], { 'share_network_id': None, 'status': constants.STATUS_CREATING, 'scheduled_at': self.dt_utc, 'host': host, 'availability_zone_id': 'fake_id', } ) db_api.share_type_get.assert_called_once_with(self.context, share['share_type_id']) self.api.share_rpcapi.create_share_instance.assert_called_once_with( self.context, share_instance, host, request_spec=mock.ANY, filter_properties={}, snapshot_id=share['snapshot_id'], ) self.assertFalse( self.api.scheduler_rpcapi.create_share_instance.called) def test_create_share_instance_without_host(self): _, share, share_instance = self._setup_create_instance_mocks() self.api.create_instance(self.context, share) self.api.scheduler_rpcapi.create_share_instance.\ assert_called_once_with( self.context, request_spec=mock.ANY, filter_properties={}) self.assertFalse(self.api.share_rpcapi.create_share_instance.called) @ddt.data('no_valid_host', None) def test_manage_new(self, exc): share_data = { 'host': 'fake', 'export_location': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } driver_options = {} date = datetime.datetime(1, 1, 1, 1, 1, 1) timeutils.utcnow.return_value = date fake_share_data = { 'id': 'fakeid', 'status': constants.STATUS_CREATING, } fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, }, } share = db_api.share_create(self.context, fake_share_data) if exc: self.mock_object(self.scheduler_rpcapi, 'manage_share', mock.Mock(side_effect=exception.NoValidHost)) self.mock_object(db_api, 'share_create', mock.Mock(return_value=share)) self.mock_object(db_api, 'share_export_locations_update') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(self.api, 'get_all', mock.Mock(return_value=[])) if exc: self.assertRaises(exception.InvalidHost, self.api.manage, self.context, copy.deepcopy(share_data), driver_options) else: self.api.manage(self.context, copy.deepcopy(share_data), driver_options) share_data.update({ 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_MANAGING, 'scheduled_at': date, 'snapshot_support': fake_type['extra_specs']['snapshot_support'], }) expected_request_spec = self._get_request_spec_dict( share, fake_type, size=0) export_location = share_data.pop('export_location') self.api.get_all.assert_called_once_with(self.context, mock.ANY) db_api.share_create.assert_called_once_with(self.context, share_data) if not exc: db_api.share_get.assert_called_once_with(self.context, share['id']) db_api.share_export_locations_update.assert_called_once_with( self.context, share.instance['id'], export_location ) self.scheduler_rpcapi.manage_share.assert_called_once_with( self.context, share['id'], driver_options, expected_request_spec) @ddt.data([{'id': 'fake', 'status': constants.STATUS_MANAGE_ERROR}]) def test_manage_retry(self, shares): share_data = { 'host': 'fake', 'export_location': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } driver_options = {} fake_share_data = {'id': 'fakeid'} fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, }, } share = db_api.share_create(self.context, fake_share_data) self.mock_object(db_api, 'share_update', mock.Mock(return_value=share)) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(db_api, 'share_export_locations_update') self.mock_object(self.api, 'get_all', mock.Mock(return_value=shares)) self.api.manage(self.context, copy.deepcopy(share_data), driver_options) expected_request_spec = self._get_request_spec_dict( share, fake_type, size=0) db_api.share_update.assert_called_once_with( self.context, 'fake', mock.ANY) self.scheduler_rpcapi.manage_share.assert_called_once_with( self.context, share['id'], driver_options, expected_request_spec) db_api.share_export_locations_update.assert_called_once_with( self.context, share.instance['id'], mock.ANY ) def test_manage_duplicate(self): share_data = { 'host': 'fake', 'export_location': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } driver_options = {} fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, }, } self.mock_object(self.api, 'get_all', mock.Mock(return_value=['fake', 'fake2'])) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.assertRaises(exception.ManilaException, self.api.manage, self.context, share_data, driver_options) def _get_request_spec_dict(self, share, share_type, **kwargs): share_instance = share['instance'] share_properties = { 'size': kwargs.get('size', share['size']), 'user_id': kwargs.get('user_id', share['user_id']), 'project_id': kwargs.get('project_id', share['project_id']), 'snapshot_support': kwargs.get( 'snapshot_support', share_type['extra_specs']['snapshot_support']), 'share_proto': kwargs.get('share_proto', share['share_proto']), 'share_type_id': kwargs.get('share_type_id', share['share_type_id']), 'is_public': kwargs.get('is_public', share['is_public']), 'consistency_group_id': kwargs.get('consistency_group_id', share['consistency_group_id']), 'source_cgsnapshot_member_id': kwargs.get( 'source_cgsnapshot_member_id', share['source_cgsnapshot_member_id']), 'snapshot_id': kwargs.get('snapshot_id', share['snapshot_id']), } share_instance_properties = { 'availability_zone_id': kwargs.get( 'availability_zone_id', share_instance['availability_zone_id']), 'share_network_id': kwargs.get('share_network_id', share_instance['share_network_id']), 'share_server_id': kwargs.get('share_server_id', share_instance['share_server_id']), 'share_id': kwargs.get('share_id', share_instance['share_id']), 'host': kwargs.get('host', share_instance['host']), 'status': kwargs.get('status', share_instance['status']), } request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_type': share_type, 'share_id': share['id'] } return request_spec def test_unmanage(self): share = db_utils.create_share( id='fakeid', host='fake', size='1', status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, task_state=None) self.mock_object(db_api, 'share_update', mock.Mock()) self.api.unmanage(self.context, share) self.share_rpcapi.unmanage_share.assert_called_once_with( self.context, mock.ANY) db_api.share_update.assert_called_once_with( mock.ANY, share['id'], mock.ANY) def test_unmanage_task_state_busy(self): share = db_utils.create_share( id='fakeid', host='fake', size='1', status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.unmanage, self.context, share) @mock.patch.object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) @mock.patch.object(quota.QUOTAS, 'commit', mock.Mock()) def test_create_snapshot(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_CREATING, size=1) share = snapshot['share'] fake_name = 'fakename' fake_desc = 'fakedesc' options = { 'share_id': share['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'size': 1, 'display_name': fake_name, 'display_description': fake_desc, 'share_proto': share['share_proto'], } with mock.patch.object(db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)): self.api.create_snapshot(self.context, share, fake_name, fake_desc) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) quota.QUOTAS.reserve.assert_called_once_with( self.context, snapshots=1, snapshot_gigabytes=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation') db_api.share_snapshot_create.assert_called_once_with( self.context, options) def test_create_snapshot_for_replicated_share(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot( create_instance=True, share_instance_id='id2') replicas = [ fakes.fake_replica( id='id1', replica_state=constants.REPLICA_STATE_ACTIVE), fakes.fake_replica( id='id2', replica_state=constants.REPLICA_STATE_IN_SYNC) ] self.mock_object(share_api.policy, 'check_policy') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object( db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)) self.mock_object(db_api, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(quota.QUOTAS, 'commit') mock_instance_create_call = self.mock_object( db_api, 'share_snapshot_instance_create') mock_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'create_snapshot') mock_replicated_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'create_replicated_snapshot') snapshot_instance_args = { 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_instance_id': 'id1', } retval = self.api.create_snapshot( self.context, share, 'fake_name', 'fake_description') self.assertEqual(snapshot['id'], retval['id']) mock_instance_create_call.assert_called_once_with( self.context, snapshot['id'], snapshot_instance_args) self.assertFalse(mock_snapshot_rpc_call.called) self.assertTrue(mock_replicated_snapshot_rpc_call.called) @mock.patch.object(db_api, 'share_instances_get_all_by_share_server', mock.Mock(return_value=[])) @mock.patch.object(db_api, 'consistency_group_get_all_by_share_server', mock.Mock(return_value=[])) def test_delete_share_server_no_dependent_shares(self): server = {'id': 'fake_share_server_id'} server_returned = { 'id': 'fake_share_server_id', } self.mock_object(db_api, 'share_server_update', mock.Mock(return_value=server_returned)) self.api.delete_share_server(self.context, server) db_api.share_instances_get_all_by_share_server.assert_called_once_with( self.context, server['id']) db_api.consistency_group_get_all_by_share_server.\ assert_called_once_with(self.context, server['id']) self.share_rpcapi.delete_share_server.assert_called_once_with( self.context, server_returned) @mock.patch.object(db_api, 'share_instances_get_all_by_share_server', mock.Mock(return_value=['fake_share', ])) @mock.patch.object(db_api, 'consistency_group_get_all_by_share_server', mock.Mock(return_value=[])) def test_delete_share_server_dependent_share_exists(self): server = {'id': 'fake_share_server_id'} self.assertRaises(exception.ShareServerInUse, self.api.delete_share_server, self.context, server) db_api.share_instances_get_all_by_share_server.assert_called_once_with( self.context, server['id']) @mock.patch.object(db_api, 'share_instances_get_all_by_share_server', mock.Mock(return_value=[])) @mock.patch.object(db_api, 'consistency_group_get_all_by_share_server', mock.Mock(return_value=['fake_cg', ])) def test_delete_share_server_dependent_cg_exists(self): server = {'id': 'fake_share_server_id'} self.assertRaises(exception.ShareServerInUse, self.api.delete_share_server, self.context, server) db_api.share_instances_get_all_by_share_server.assert_called_once_with( self.context, server['id']) db_api.consistency_group_get_all_by_share_server.\ assert_called_once_with(self.context, server['id']) @mock.patch.object(db_api, 'share_snapshot_instance_update', mock.Mock()) def test_delete_snapshot(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_AVAILABLE) share = snapshot['share'] with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): self.api.delete_snapshot(self.context, snapshot) self.share_rpcapi.delete_snapshot.assert_called_once_with( self.context, snapshot, share['host']) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) db_api.share_snapshot_instance_update.assert_called_once_with( self.context, snapshot['instance']['id'], {'status': constants.STATUS_DELETING}) db_api.share_get.assert_called_once_with( self.context, snapshot['share_id']) def test_delete_snapshot_wrong_status(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_CREATING) self.assertRaises(exception.InvalidShareSnapshot, self.api.delete_snapshot, self.context, snapshot) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) @ddt.data(True, False) def test_delete_snapshot_replicated_snapshot(self, force): share = fakes.fake_share(has_replicas=True) snapshot = fakes.fake_snapshot( create_instance=True, share_id=share['id'], status=constants.STATUS_ERROR) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) expected_update_calls = [ mock.call(self.context, x, {'status': constants.STATUS_DELETING}) for x in (snapshot['instance']['id'], snapshot_instance['id']) ] self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot['instance'], snapshot_instance])) mock_db_update_call = self.mock_object( db_api, 'share_snapshot_instance_update') mock_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'delete_snapshot') mock_replicated_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'delete_replicated_snapshot') retval = self.api.delete_snapshot(self.context, snapshot, force=force) self.assertIsNone(retval) self.assertEqual(2, mock_db_update_call.call_count) mock_db_update_call.assert_has_calls(expected_update_calls) mock_replicated_snapshot_rpc_call.assert_called_once_with( self.context, snapshot, share['instance']['host'], share_id=share['id'], force=force) self.assertFalse(mock_snapshot_rpc_call.called) def test_create_snapshot_if_share_not_available(self): share = db_utils.create_share(status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.create_snapshot, self.context, share, 'fakename', 'fakedesc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) def test_create_snapshot_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.create_snapshot, self.context, share, 'fakename', 'fakedesc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) @ddt.data({'use_scheduler': False, 'valid_host': 'fake'}, {'use_scheduler': True, 'valid_host': None}) @ddt.unpack def test_create_from_snapshot(self, use_scheduler, valid_host): snapshot, share, share_data, request_spec = ( self._setup_create_from_snapshot_mocks( use_scheduler=use_scheduler, host=valid_host) ) share_type = fakes.fake_share_type() mock_get_share_type_call = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) az = share_data.pop('availability_zone') self.api.create( self.context, share_data['share_proto'], None, # NOTE(u_glide): Get share size from snapshot share_data['display_name'], share_data['display_description'], snapshot_id=snapshot['id'], availability_zone=az ) mock_get_share_type_call.assert_called_once_with( self.context, share['share_type_id']) self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) self.api.create_instance.assert_called_once_with( self.context, share, share_network_id=share['share_network_id'], host=valid_host, availability_zone=snapshot['share']['availability_zone'], consistency_group=None, cgsnapshot_member=None) share_api.policy.check_policy.assert_has_calls([ mock.call(self.context, 'share', 'create'), mock.call(self.context, 'share_snapshot', 'get_snapshot')]) quota.QUOTAS.reserve.assert_called_once_with( self.context, gigabytes=1, shares=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation') def test_create_from_snapshot_with_different_share_type(self): snapshot, share, share_data, request_spec = ( self._setup_create_from_snapshot_mocks() ) share_type = {'id': 'super_fake_share_type'} self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], snapshot_id=snapshot['id'], availability_zone=share_data['availability_zone'], share_type=share_type) def test_get_snapshot(self): fake_get_snap = {'fake_key': 'fake_val'} with mock.patch.object(db_api, 'share_snapshot_get', mock.Mock(return_value=fake_get_snap)): rule = self.api.get_snapshot(self.context, 'fakeid') self.assertEqual(fake_get_snap, rule) share_api.policy.check_policy.assert_called_once_with( self.context, 'share_snapshot', 'get_snapshot') db_api.share_snapshot_get.assert_called_once_with( self.context, 'fakeid') def test_create_from_snapshot_not_available(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShareSnapshot, self.api.create, self.context, 'nfs', '1', 'fakename', 'fakedesc', snapshot_id=snapshot['id'], availability_zone='fakeaz') def test_create_from_snapshot_larger_size(self): snapshot = db_utils.create_snapshot( size=100, status=constants.STATUS_AVAILABLE, with_share=True) self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 1, 'fakename', 'fakedesc', availability_zone='fakeaz', snapshot_id=snapshot['id']) def test_create_share_wrong_size_0(self): self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 0, 'fakename', 'fakedesc', availability_zone='fakeaz') def test_create_share_wrong_size_some(self): self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 'some', 'fakename', 'fakedesc', availability_zone='fakeaz') @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_ERROR) def test_delete(self, status): share = self._setup_delete_mocks(status) self.api.delete(self.context, share) self.api.delete_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), force=False ) db_api.share_snapshot_get_all_for_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share['id']) def test_delete_quota_with_different_user(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE) diff_user_context = context.RequestContext( user_id='fake2', project_id='fake', is_admin=False ) self.api.delete(diff_user_context, share) quota.QUOTAS.reserve.assert_called_once_with( diff_user_context, project_id=share['project_id'], shares=-1, gigabytes=-share['size'], user_id=share['user_id'] ) quota.QUOTAS.commit.assert_called_once_with( diff_user_context, mock.ANY, project_id=share['project_id'], user_id=share['user_id'] ) def test_delete_wrong_status(self): share = fake_share('fakeid') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) def test_delete_share_has_replicas(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, replication_type='writable') db_utils.create_share_replica(share_id=share['id'], replica_state='in_sync') db_utils.create_share_replica(share_id=share['id'], replica_state='out_of_sync') self.assertRaises(exception.Conflict, self.api.delete, self.context, share) @mock.patch.object(db_api, 'count_cgsnapshot_members_in_share', mock.Mock(return_value=2)) def test_delete_dependent_cgsnapshot_members(self): share_server_id = 'fake-ss-id' share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, share_server_id) self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) @mock.patch.object(db_api, 'share_instance_delete', mock.Mock()) def test_delete_no_host(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, host=None) self.api.delete(self.context, share) db_api.share_instance_delete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id']) def test_delete_share_with_snapshots(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, snapshots=['fake']) self.assertRaises( exception.InvalidShare, self.api.delete, self.context, share ) def test_delete_share_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.delete, self.context, share) def test_delete_share_quota_error(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError('fake'))) self.api.delete(self.context, share) quota.QUOTAS.reserve.assert_called_once_with( self.context, project_id=share['project_id'], shares=-1, gigabytes=-share['size'], user_id=share['user_id'] ) self.assertFalse(quota.QUOTAS.commit.called) @ddt.data({'status': constants.STATUS_AVAILABLE, 'force': False}, {'status': constants.STATUS_ERROR, 'force': True}) @ddt.unpack def test_delete_share_instance(self, status, force): instance = self._setup_delete_share_instance_mocks( status=status, share_server_id='fake') self.api.delete_instance(self.context, instance, force=force) db_api.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_DELETING, 'terminated_at': self.dt_utc} ) self.api.share_rpcapi.delete_share_instance.assert_called_once_with( self.context, instance, force=force ) db_api.share_server_update( self.context, instance['share_server_id'], {'updated_at': self.dt_utc} ) def test_delete_share_instance_invalid_status(self): instance = self._setup_delete_share_instance_mocks( status=constants.STATUS_CREATING, share_server_id='fake') self.assertRaises( exception.InvalidShareInstance, self.api.delete_instance, self.context, instance ) @ddt.data('', 'fake', 'Truebar', 'Bartrue') def test_update_share_with_invalid_is_public_value(self, is_public): self.assertRaises(exception.InvalidParameterValue, self.api.update, self.context, 'fakeid', {'is_public': is_public}) def test_get(self): share = db_utils.create_share() with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): result = self.api.get(self.context, 'fakeid') self.assertEqual(share, result) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'get', share) db_api.share_get.assert_called_once_with( self.context, 'fakeid') @mock.patch.object(db_api, 'share_snapshot_get_all_by_project', mock.Mock()) def test_get_all_snapshots_admin_not_all_tenants(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=True) self.api.get_all_snapshots(ctx) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', sort_dir='desc', sort_key='share_id', filters={}) @mock.patch.object(db_api, 'share_snapshot_get_all', mock.Mock()) def test_get_all_snapshots_admin_all_tenants(self): self.api.get_all_snapshots(self.context, search_opts={'all_tenants': 1}) share_api.policy.check_policy.assert_called_once_with( self.context, 'share_snapshot', 'get_all_snapshots') db_api.share_snapshot_get_all.assert_called_once_with( self.context, sort_dir='desc', sort_key='share_id', filters={}) @mock.patch.object(db_api, 'share_snapshot_get_all_by_project', mock.Mock()) def test_get_all_snapshots_not_admin(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.api.get_all_snapshots(ctx) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', sort_dir='desc', sort_key='share_id', filters={}) def test_get_all_snapshots_not_admin_search_opts(self): search_opts = {'size': 'fakesize'} fake_objs = [{'name': 'fakename1'}, search_opts] ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.mock_object(db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=fake_objs)) result = self.api.get_all_snapshots(ctx, search_opts) self.assertEqual([search_opts], result) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', sort_dir='desc', sort_key='share_id', filters=search_opts) def test_get_all_snapshots_with_sorting_valid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) snapshots = self.api.get_all_snapshots( ctx, sort_key='status', sort_dir='asc') share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fake_pid_1', sort_dir='asc', sort_key='status', filters={}) self.assertEqual(_FAKE_LIST_OF_ALL_SNAPSHOTS[0], snapshots) def test_get_all_snapshots_sort_key_invalid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all_snapshots, ctx, sort_key=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') def test_get_all_snapshots_sort_dir_invalid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all_snapshots, ctx, sort_dir=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') @ddt.data(None, 'rw', 'ro') def test_allow_access(self, level): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) values = { 'share_id': share['id'], 'access_type': 'fake_access_type', 'access_to': 'fake_access_to', 'access_level': level, } fake_access_expected = copy.deepcopy(values) fake_access_expected.update({ 'id': 'fake_access_id', 'state': constants.STATUS_ACTIVE, }) fake_access = copy.deepcopy(fake_access_expected) fake_access.update({ 'deleted': 'fake_deleted', 'deleted_at': 'fake_deleted_at', 'instance_mappings': ['foo', 'bar'], }) self.mock_object(db_api, 'share_access_create', mock.Mock(return_value=fake_access)) self.mock_object(db_api, 'share_access_get', mock.Mock(return_value=fake_access)) access = self.api.allow_access( self.context, share, fake_access['access_type'], fake_access['access_to'], level) self.assertEqual(fake_access_expected, access) self.share_rpcapi.allow_access.assert_called_once_with( self.context, utils.IsAMatcher(models.ShareInstance), fake_access) db_api.share_access_create.assert_called_once_with( self.context, values) share_api.policy.check_policy.assert_called_with( self.context, 'share', 'allow_access') def test_allow_access_existent_access(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) fake_access = db_utils.create_access(share_id=share['id']) self.assertRaises(exception.ShareAccessExists, self.api.allow_access, self.context, share, fake_access['access_type'], fake_access['access_to'], fake_access['access_level'] ) def test_allow_access_invalid_access_level(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) self.assertRaises(exception.InvalidShareAccess, self.api.allow_access, self.context, share, 'fakeacctype', 'fakeaccto', 'ab') def test_allow_access_status_not_available(self): share = db_utils.create_share(status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.allow_access, self.context, share, 'fakeacctype', 'fakeaccto') def test_allow_access_no_host(self): share = db_utils.create_share(host=None) self.assertRaises(exception.InvalidShare, self.api.allow_access, self.context, share, 'fakeacctype', 'fakeaccto') @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_UPDATING) def test_allow_access_to_instance(self, status): share = db_utils.create_share(host='fake') share_instance = db_utils.create_share_instance( share_id=share['id'], access_rules_status=status, host='fake') access = db_utils.create_access(share_id=share['id']) rpc_method = self.mock_object(self.api.share_rpcapi, 'allow_access') self.api.allow_access_to_instance(self.context, share_instance, access) rpc_method.assert_called_once_with( self.context, share_instance, access) def test_allow_access_to_instance_exception(self): share = db_utils.create_share(host='fake') access = db_utils.create_access(share_id=share['id']) share.instance['access_rules_status'] = constants.STATUS_ERROR self.assertRaises(exception.InvalidShareInstance, self.api.allow_access_to_instance, self.context, share.instance, access) def test_allow_access_to_instance_out_of_sync(self): share = db_utils.create_share(host='fake') access = db_utils.create_access(share_id=share['id']) rpc_method = self.mock_object(self.api.share_rpcapi, 'allow_access') share.instance['access_rules_status'] = constants.STATUS_OUT_OF_SYNC self.api.allow_access_to_instance(self.context, share.instance, access) rpc_method.assert_called_once_with( self.context, share.instance, access) @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_UPDATING, constants.STATUS_UPDATING_MULTIPLE) def test_deny_access_to_instance(self, status): share = db_utils.create_share(host='fake') share_instance = db_utils.create_share_instance( share_id=share['id'], access_rules_status=status, host='fake') access = db_utils.create_access(share_id=share['id']) rpc_method = self.mock_object(self.api.share_rpcapi, 'deny_access') self.mock_object(db_api, 'share_instance_access_get', mock.Mock(return_value=access.instance_mappings[0])) self.mock_object(db_api, 'share_instance_update_access_status') self.api.deny_access_to_instance(self.context, share_instance, access) if status == constants.STATUS_ACTIVE: expected_new_status = constants.STATUS_OUT_OF_SYNC else: expected_new_status = constants.STATUS_UPDATING_MULTIPLE rpc_method.assert_called_once_with( self.context, share_instance, access) db_api.share_instance_update_access_status.assert_called_once_with( self.context, share_instance['id'], expected_new_status ) @ddt.data('allow_access_to_instance', 'deny_access_to_instance') def test_allow_and_deny_access_to_instance_invalid_instance(self, method): share = db_utils.create_share(host=None) self.assertRaises( exception.InvalidShareInstance, getattr(self.api, method), self.context, share.instance, 'fake' ) @mock.patch.object(db_api, 'share_get', mock.Mock()) @mock.patch.object(share_api.API, 'deny_access_to_instance', mock.Mock()) @mock.patch.object(db_api, 'share_instance_update_access_status', mock.Mock()) def test_deny_access_error(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) db_api.share_get.return_value = share access = db_utils.create_access(share_id=share['id']) share_instance = share.instances[0] db_api.share_instance_access_get_all.return_value = [share_instance, ] self.api.deny_access(self.context, share, access) db_api.share_get.assert_called_once_with(self.context, share['id']) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'deny_access') share_api.API.deny_access_to_instance.assert_called_once_with( self.context, share_instance, access) @mock.patch.object(db_api, 'share_get', mock.Mock()) @mock.patch.object(db_api, 'share_instance_access_get_all', mock.Mock()) @mock.patch.object(db_api, 'share_access_delete', mock.Mock()) def test_deny_access_error_no_share_instance_mapping(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) db_api.share_get.return_value = share access = db_utils.create_access(share_id=share['id']) db_api.share_instance_access_get_all.return_value = [] self.api.deny_access(self.context, share, access) db_api.share_get.assert_called_once_with(self.context, share['id']) self.assertTrue(share_api.policy.check_policy.called) @mock.patch.object(db_api, 'share_instance_update_access_status', mock.Mock()) def test_deny_access_active(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) access = db_utils.create_access(share_id=share['id']) self.api.deny_access(self.context, share, access) db_api.share_instance_update_access_status.assert_called_once_with( self.context, share.instance['id'], constants.STATUS_OUT_OF_SYNC ) share_api.policy.check_policy.assert_called_with( self.context, 'share', 'deny_access') self.share_rpcapi.deny_access.assert_called_once_with( self.context, utils.IsAMatcher(models.ShareInstance), access) def test_deny_access_not_found(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) access = db_utils.create_access(share_id=share['id']) self.mock_object(db_api, 'share_instance_access_get', mock.Mock(side_effect=[exception.NotFound('fake')])) self.api.deny_access(self.context, share, access) share_api.policy.check_policy.assert_called_with( self.context, 'share', 'deny_access') def test_deny_access_status_not_available(self): share = db_utils.create_share(status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.deny_access, self.context, share, 'fakeacc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'deny_access') def test_deny_access_no_host(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, host=None) self.assertRaises(exception.InvalidShare, self.api.deny_access, self.context, share, 'fakeacc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'deny_access') def test_access_get(self): with mock.patch.object(db_api, 'share_access_get', mock.Mock(return_value='fake')): rule = self.api.access_get(self.context, 'fakeid') self.assertEqual('fake', rule) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'access_get') db_api.share_access_get.assert_called_once_with( self.context, 'fakeid') def test_access_get_all(self): share = db_utils.create_share(id='fakeid') values = { 'fakeacc0id': { 'id': 'fakeacc0id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'access_level': 'rw', 'share_id': share['id'], }, 'fakeacc1id': { 'id': 'fakeacc1id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'access_level': 'rw', 'share_id': share['id'], }, } rules = [ db_utils.create_access(**values['fakeacc0id']), db_utils.create_access(**values['fakeacc1id']), ] # add state property values['fakeacc0id']['state'] = constants.STATUS_ACTIVE values['fakeacc1id']['state'] = constants.STATUS_ACTIVE self.mock_object(db_api, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) actual = self.api.access_get_all(self.context, share) for access in actual: expected_access = values[access['id']] expected_access.pop('share_id') self.assertEqual(expected_access, access) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'access_get_all') db_api.share_access_get_all_for_share.assert_called_once_with( self.context, 'fakeid') def test_share_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} share_id = str(uuid.uuid4()) db_api.share_create(self.context, {'id': share_id, 'metadata': metadata}) self.assertEqual(metadata, db_api.share_metadata_get(self.context, share_id)) def test_share_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} share_id = str(uuid.uuid4()) db_api.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_api.share_metadata_update(self.context, share_id, metadata2, False) self.assertEqual(should_be, db_api.share_metadata_get(self.context, share_id)) def test_share_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 share_id = str(uuid.uuid4()) db_api.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_api.share_metadata_update(self.context, share_id, metadata2, True) self.assertEqual(should_be, db_api.share_metadata_get(self.context, share_id)) def test_extend_invalid_status(self): invalid_status = 'fake' share = db_utils.create_share(status=invalid_status) new_size = 123 self.assertRaises(exception.InvalidShare, self.api.extend, self.context, share, new_size) def test_extend_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) new_size = 123 self.assertRaises(exception.ShareBusyException, self.api.extend, self.context, share, new_size) def test_extend_invalid_size(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=200) new_size = 123 self.assertRaises(exception.InvalidInput, self.api.extend, self.context, share, new_size) def test_extend_quota_error(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) new_size = 123 usages = {'gigabytes': {'reserved': 11, 'in_use': 12}} quotas = {'gigabytes': 13} exc = exception.OverQuota(usages=usages, quotas=quotas, overs=new_size) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.assertRaises(exception.ShareSizeExceedsAvailableQuota, self.api.extend, self.context, share, new_size) def test_extend_quota_user(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) diff_user_context = context.RequestContext( user_id='fake2', project_id='fake', is_admin=False ) new_size = 123 size_increase = int(new_size) - share['size'] self.mock_object(quota.QUOTAS, 'reserve') self.api.extend(diff_user_context, share, new_size) quota.QUOTAS.reserve.assert_called_once_with( diff_user_context, project_id=share['project_id'], gigabytes=size_increase, user_id=share['user_id'] ) def test_extend_valid(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) new_size = 123 self.mock_object(self.api, 'update') self.mock_object(self.api.share_rpcapi, 'extend_share') self.api.extend(self.context, share, new_size) self.api.update.assert_called_once_with( self.context, share, {'status': constants.STATUS_EXTENDING}) self.api.share_rpcapi.extend_share.assert_called_once_with( self.context, share, new_size, mock.ANY ) def test_shrink_invalid_status(self): invalid_status = 'fake' share = db_utils.create_share(status=invalid_status) self.assertRaises(exception.InvalidShare, self.api.shrink, self.context, share, 123) def test_shrink_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.shrink, self.context, share, 123) @ddt.data(300, 0, -1) def test_shrink_invalid_size(self, new_size): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=200) self.assertRaises(exception.InvalidInput, self.api.shrink, self.context, share, new_size) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR) def test_shrink_valid(self, share_status): share = db_utils.create_share(status=share_status, size=100) new_size = 50 self.mock_object(self.api, 'update') self.mock_object(self.api.share_rpcapi, 'shrink_share') self.api.shrink(self.context, share, new_size) self.api.update.assert_called_once_with( self.context, share, {'status': constants.STATUS_SHRINKING}) self.api.share_rpcapi.shrink_share.assert_called_once_with( self.context, share, new_size ) def test_migration_start(self): host = 'fake2@backend#pool' fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, }, } share = db_utils.create_share( status=constants.STATUS_AVAILABLE, host='fake@backend#pool', share_type_id=fake_type['id']) request_spec = self._get_request_spec_dict( share, fake_type, size=0) self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(utils, 'validate_service_host') self.api.migration_start(self.context, share, host, True, True) self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( self.context, share['id'], host, True, True, request_spec) def test_migration_start_status_unavailable(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, host, True, True) def test_migration_start_task_state_invalid(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.migration_start, self.context, share, host, True, True) def test_migration_start_with_snapshots(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=True)) self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, host, True, True) def test_migration_start_has_replicas(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, replication_type='dr') for i in range(1, 4): db_utils.create_share_replica( share_id=share['id'], replica_state='in_sync') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=True)) mock_log = self.mock_object(share_api, 'LOG') mock_snapshot_get_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share') # Share was updated after adding replicas, grabbing it again. share = db_api.share_get(self.context, share['id']) self.assertRaises(exception.Conflict, self.api.migration_start, self.context, share, host, True) self.assertTrue(mock_log.error.called) self.assertFalse(mock_snapshot_get_call.called) def test_migration_start_invalid_host(self): host = 'fake@backend#pool' share = db_utils.create_share( host='fake2@backend', status=constants.STATUS_AVAILABLE) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=False)) self.assertRaises(exception.ServiceNotFound, self.api.migration_start, self.context, share, host, True, True) def test_migration_start_same_host(self): host = 'fake@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) self.assertRaises(exception.InvalidHost, self.api.migration_start, self.context, share, host, True, True) def test_migration_start_exception(self): host = 'fake2@backend#pool' fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, }, } share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=fake_type['id']) self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_update', mock.Mock(return_value=True)) self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host', mock.Mock(side_effect=exception.ShareMigrationFailed( reason='fake'))) self.assertRaises(exception.InvalidHost, self.api.migration_start, self.context, share, host, True, True) db_api.share_update.assert_any_call( mock.ANY, share['id'], mock.ANY) @ddt.data({}, {'replication_type': None}) def test_create_share_replica_invalid_share_type(self, attributes): share = fakes.fake_share(id='FAKE_SHARE_ID', **attributes) mock_request_spec_call = self.mock_object( self.api, '_create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.assertRaises(exception.InvalidShare, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) def test_create_share_replica_busy_share(self): share = fakes.fake_share( id='FAKE_SHARE_ID', task_state='doing_something_real_important', is_busy=True, replication_type='dr') mock_request_spec_call = self.mock_object( self.api, '_create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.assertRaises(exception.ShareBusyException, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) @ddt.data(None, []) def test_create_share_replica_no_active_replica(self, active_replicas): share = fakes.fake_share( id='FAKE_SHARE_ID', replication_type='dr') mock_request_spec_call = self.mock_object( self.api, '_create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replicas)) self.assertRaises(exception.ReplicationException, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) @ddt.data(True, False) def test_create_share_replica(self, has_snapshots): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] share = fakes.fake_share( id=replica['share_id'], replication_type='dr') snapshots = ( [fakes.fake_snapshot(), fakes.fake_snapshot()] if has_snapshots else [] ) fake_replica = fakes.fake_replica(id=replica['id']) fake_request_spec = fakes.fake_replica_request_spec() self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object( share_api.API, '_create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_request_spec, fake_replica))) self.mock_object(db_api, 'share_replica_update') mock_sched_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') mock_snapshot_get_all_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) mock_snapshot_instance_create_call = self.mock_object( db_api, 'share_snapshot_instance_create') expected_snap_instance_create_call_count = 2 if has_snapshots else 0 result = self.api.create_share_replica( self.context, share, availability_zone='FAKE_AZ') self.assertTrue(mock_sched_rpcapi_call.called) self.assertEqual(replica, result) mock_snapshot_get_all_call.assert_called_once_with( self.context, fake_replica['share_id']) self.assertEqual(expected_snap_instance_create_call_count, mock_snapshot_instance_create_call.call_count) def test_delete_last_active_replica(self): fake_replica = fakes.fake_replica( share_id='FAKE_SHARE_ID', replica_state=constants.REPLICA_STATE_ACTIVE) self.mock_object(db_api, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica])) mock_log = self.mock_object(share_api.LOG, 'info') self.assertRaises( exception.ReplicationException, self.api.delete_share_replica, self.context, fake_replica) self.assertFalse(mock_log.called) @ddt.data(True, False) def test_delete_share_replica_no_host(self, has_snapshots): snapshots = [{'id': 'xyz'}, {'id': 'abc'}, {'id': 'pqr'}] snapshots = snapshots if has_snapshots else [] replica = fakes.fake_replica('FAKE_ID', host='') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'delete_share_replica') mock_db_replica_delete_call = self.mock_object( db_api, 'share_replica_delete') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_snapshot_get_call = self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) mock_snapshot_instance_delete_call = self.mock_object( db_api, 'share_snapshot_instance_delete') self.api.delete_share_replica(self.context, replica) self.assertFalse(mock_sched_rpcapi_call.called) mock_db_replica_delete_call.assert_called_once_with( self.context, replica['id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_DELETING, 'terminated_at': mock.ANY}) mock_snapshot_get_call.assert_called_once_with( self.context, {'share_instance_ids': replica['id']}) self.assertEqual( len(snapshots), mock_snapshot_instance_delete_call.call_count) @ddt.data(True, False) def test_delete_share_replica(self, force): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'delete_share_replica') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') self.api.delete_share_replica(self.context, replica, force=force) mock_sched_rpcapi_call.assert_called_once_with( self.context, replica, force=force) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_DELETING, 'terminated_at': mock.ANY}) @ddt.data(constants.STATUS_CREATING, constants.STATUS_DELETING, constants.STATUS_ERROR, constants.STATUS_EXTENDING, constants.STATUS_REPLICATION_CHANGE, constants.STATUS_MANAGING, constants.STATUS_ERROR_DELETING) def test_promote_share_replica_non_available_status(self, status): replica = fakes.fake_replica( status=status, replica_state=constants.REPLICA_STATE_IN_SYNC) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') self.assertRaises(exception.ReplicationException, self.api.promote_share_replica, self.context, replica) self.assertFalse(mock_rpcapi_promote_share_replica_call.called) @ddt.data(constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) def test_promote_share_replica_out_of_sync_non_admin(self, replica_state): fake_user_context = context.RequestContext( user_id=None, project_id=None, is_admin=False, read_deleted='no', overwrite=False) replica = fakes.fake_replica( status=constants.STATUS_AVAILABLE, replica_state=replica_state) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') self.assertRaises(exception.AdminRequired, self.api.promote_share_replica, fake_user_context, replica) self.assertFalse(mock_rpcapi_promote_share_replica_call.called) @ddt.data(constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) def test_promote_share_replica_admin_authorized(self, replica_state): replica = fakes.fake_replica( status=constants.STATUS_AVAILABLE, replica_state=replica_state, host='HOSTA@BackendB#PoolC') self.mock_object(db_api, 'share_replica_get', mock.Mock(return_value=replica)) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') retval = self.api.promote_share_replica( self.context, replica) self.assertEqual(replica, retval) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_REPLICATION_CHANGE}) mock_rpcapi_promote_share_replica_call.assert_called_once_with( self.context, replica) def test_promote_share_replica(self): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') self.mock_object(db_api, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db_api, 'share_replica_update') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') result = self.api.promote_share_replica(self.context, replica) mock_sched_rpcapi_call.assert_called_once_with( self.context, replica) self.assertEqual(replica, result) def test_update_share_replica_no_host(self): replica = fakes.fake_replica('FAKE_ID') replica['host'] = None mock_rpcapi_update_share_replica_call = self.mock_object( self.share_rpcapi, 'update_share_replica') self.assertRaises(exception.InvalidHost, self.api.update_share_replica, self.context, replica) self.assertFalse(mock_rpcapi_update_share_replica_call.called) def test_update_share_replica(self): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') mock_rpcapi_update_share_replica_call = self.mock_object( self.share_rpcapi, 'update_share_replica') retval = self.api.update_share_replica(self.context, replica) self.assertTrue(mock_rpcapi_update_share_replica_call.called) self.assertIsNone(retval) def test_migration_complete(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, instances=[instance1, instance2]) self.mock_object(share_rpc.ShareAPI, 'migration_complete') self.api.migration_complete(self.context, share) share_rpc.ShareAPI.migration_complete.assert_called_once_with( self.context, share, instance1['id'], instance2['id']) def test_migration_complete_task_state_invalid(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) self.assertRaises(exception.InvalidShare, self.api.migration_complete, self.context, share) def test_migration_complete_status_invalid(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, instances=[instance1, instance2]) self.assertRaises(exception.ShareMigrationFailed, self.api.migration_complete, self.context, share) def test_migration_cancel(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) self.mock_object(data_rpc.DataAPI, 'data_copy_cancel') self.api.migration_cancel(self.context, share) data_rpc.DataAPI.data_copy_cancel.assert_called_once_with( self.context, share['id']) def test_migration_cancel_driver(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) self.mock_object(share_rpc.ShareAPI, 'migration_cancel') self.api.migration_cancel(self.context, share) share_rpc.ShareAPI.migration_cancel.assert_called_once_with( self.context, share) def test_migration_cancel_task_state_invalid(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_STARTING) self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.context, share) def test_migration_get_progress(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) expected = 'fake_progress' self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress', mock.Mock(return_value=expected)) result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with( self.context, share['id']) def test_migration_get_progress_driver(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) expected = 'fake_progress' self.mock_object(share_rpc.ShareAPI, 'migration_get_progress', mock.Mock(return_value=expected)) result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) share_rpc.ShareAPI.migration_get_progress.assert_called_once_with( self.context, share) def test_migration_get_progress_task_state_invalid(self): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_STARTING) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) class OtherTenantsShareActionsTestCase(test.TestCase): def setUp(self): super(OtherTenantsShareActionsTestCase, self).setUp() self.api = share.API() def test_delete_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.assertRaises(exception.PolicyNotAuthorized, self.api.delete, ctx, share) def test_update_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.assertRaises(exception.PolicyNotAuthorized, self.api.update, ctx, share, {'display_name': 'newname'}) def test_get_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) result = self.api.get(ctx, 'fakeid') self.assertEqual(share, result) db_api.share_get.assert_called_once_with(ctx, 'fakeid') manila-2.0.0/manila/tests/share/drivers/0000775000567000056710000000000012701407265021302 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/zfssa/0000775000567000056710000000000012701407265022430 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/zfssa/test_zfssashare.py0000664000567000056710000002624412701407107026215 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Oracle's ZFSSA Manila driver. """ import mock from oslo_config import cfg from oslo_utils import units from manila import context from manila import exception from manila.share import configuration as conf from manila.share.drivers.zfssa import zfssashare from manila import test from manila.tests import fake_zfssa CONF = cfg.CONF class ZFSSAShareDriverTestCase(test.TestCase): """Tests ZFSSAShareDriver.""" share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share2 = { 'id': 'fakeid2', 'name': 'fakename2', 'size': 4, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00003', 'space_data': 3006477107 } snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'NFS', } access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'state': 'active', } @mock.patch.object(zfssashare, 'factory_zfssa') def setUp(self, _factory_zfssa): super(ZFSSAShareDriverTestCase, self).setUp() self._create_fake_config() lcfg = self.configuration self.mountpoint = '/export/' + lcfg.zfssa_nas_mountpoint _factory_zfssa.return_value = fake_zfssa.FakeZFSSA() _factory_zfssa.set_host(lcfg.zfssa_host) _factory_zfssa.login(lcfg.zfssa_auth_user) self._context = context.get_admin_context() self._driver = zfssashare.ZFSSAShareDriver(False, configuration=lcfg) self._driver.do_setup(self._context) def _create_fake_config(self): def _safe_get(opt): return getattr(self.configuration, opt) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.zfssa_host = '1.1.1.1' self.configuration.zfssa_data_ip = '1.1.1.1' self.configuration.zfssa_auth_user = 'user' self.configuration.zfssa_auth_password = 'passwd' self.configuration.zfssa_pool = 'pool' self.configuration.zfssa_project = 'project' self.configuration.zfssa_nas_mountpoint = 'project' self.configuration.zfssa_nas_checksum = 'fletcher4' self.configuration.zfssa_nas_logbias = 'latency' self.configuration.zfssa_nas_compression = 'off' self.configuration.zfssa_nas_vscan = 'false' self.configuration.zfssa_nas_rstchown = 'true' self.configuration.zfssa_nas_quota_snap = 'true' self.configuration.zfssa_rest_timeout = 60 self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.driver_handles_share_servers = False def test_create_share(self): self.mock_object(self._driver.zfssa, 'create_share') self.mock_object(self._driver, '_export_location') lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': self.share['id'], } location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) self._driver._export_location.return_value = location arg = self._driver.create_arg(self.share['size']) arg.update(self._driver.default_args) arg.update({'name': self.share['id']}) ret = self._driver.create_share(self._context, self.share) self._driver.zfssa.create_share.assert_called_with(lcfg.zfssa_pool, lcfg.zfssa_project, arg) self.assertEqual(location, ret) self.assertEqual(1, self._driver.zfssa.create_share.call_count) self.assertEqual(1, self._driver._export_location.call_count) def test_create_share_from_snapshot(self): self.mock_object(self._driver.zfssa, 'clone_snapshot') self.mock_object(self._driver, '_export_location') lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': self.share['id'], } location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) self._driver._export_location.return_value = location arg = self._driver.create_arg(self.share['size']) details = { 'share': self.share['id'], 'project': lcfg.zfssa_project, } arg.update(details) ret = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) self.assertEqual(location, ret) self.assertEqual(1, self._driver.zfssa.clone_snapshot.call_count) self.assertEqual(1, self._driver._export_location.call_count) self._driver.zfssa.clone_snapshot.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.snapshot, self.share, arg) def test_delete_share(self): self.mock_object(self._driver.zfssa, 'delete_share') self._driver.delete_share(self._context, self.share) self.assertEqual(1, self._driver.zfssa.delete_share.call_count) lcfg = self.configuration self._driver.zfssa.delete_share.assert_called_with(lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id']) def test_create_snapshot(self): self.mock_object(self._driver.zfssa, 'create_snapshot') lcfg = self.configuration self._driver.create_snapshot(self._context, self.snapshot) self.assertEqual(1, self._driver.zfssa.create_snapshot.call_count) self._driver.zfssa.create_snapshot.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.snapshot['share_id'], self.snapshot['id']) def test_delete_snapshot(self): self.mock_object(self._driver.zfssa, 'delete_snapshot') self._driver.delete_snapshot(self._context, self.snapshot) self.assertEqual(1, self._driver.zfssa.delete_snapshot.call_count) def test_delete_snapshot_negative(self): self.mock_object(self._driver.zfssa, 'has_clones') self._driver.zfssa.has_clones.return_value = True self.assertRaises(exception.ShareSnapshotIsBusy, self._driver.delete_snapshot, self._context, self.snapshot) def test_ensure_share(self): self.mock_object(self._driver.zfssa, 'get_share') lcfg = self.configuration self._driver.ensure_share(self._context, self.share) self.assertEqual(1, self._driver.zfssa.get_share.call_count) self._driver.zfssa.get_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id']) self._driver.zfssa.get_share.return_value = None self.assertRaises(exception.ManilaException, self._driver.ensure_share, self._context, self.share) def test_allow_access(self): self.mock_object(self._driver.zfssa, 'allow_access_nfs') lcfg = self.configuration self._driver.allow_access(self._context, self.share, self.access) self.assertEqual(1, self._driver.zfssa.allow_access_nfs.call_count) self._driver.zfssa.allow_access_nfs.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], self.access) def test_deny_access(self): self.mock_object(self._driver.zfssa, 'deny_access_nfs') lcfg = self.configuration self._driver.deny_access(self._context, self.share, self.access) self.assertEqual(1, self._driver.zfssa.deny_access_nfs.call_count) self._driver.zfssa.deny_access_nfs.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], self.access) def test_extend_share_negative(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 # Not enough space in project, expect an exception: self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 1 * units.Gi self.assertRaises(exception.ShareExtendingError, self._driver.extend_share, self.share, new_size) def test_extend_share(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 lcfg = self.configuration self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 10 * units.Gi arg = self._driver.create_arg(new_size) self._driver.extend_share(self.share, new_size) self.assertEqual(1, self._driver.zfssa.modify_share.call_count) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], arg) def test_shrink_share_negative(self): self.mock_object(self._driver.zfssa, 'modify_share') # Used space is larger than 2GB new_size = 2 self.mock_object(self._driver.zfssa, 'get_share') self._driver.zfssa.get_share.return_value = self.share2 self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self.share2, new_size) def test_shrink_share(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 lcfg = self.configuration self.mock_object(self._driver.zfssa, 'get_share') self._driver.zfssa.get_share.return_value = self.share2 arg = self._driver.create_arg(new_size) self._driver.shrink_share(self.share2, new_size) self.assertEqual(1, self._driver.zfssa.modify_share.call_count) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share2['id'], arg) manila-2.0.0/manila/tests/share/drivers/zfssa/__init__.py0000664000567000056710000000000012701407107024522 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/zfssa/test_zfssarest.py0000664000567000056710000004077312701407112026067 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Oracle's ZFSSA REST API. """ import mock from oslo_log import log from manila import exception from manila.share.drivers.zfssa import restclient from manila.share.drivers.zfssa import zfssarest from manila import test from manila.tests import fake_zfssa LOG = log.getLogger(__name__) class ZFSSAApiTestCase(test.TestCase): """Tests ZFSSAApi.""" @mock.patch.object(zfssarest, 'factory_restclient') def setUp(self, _restclient): super(ZFSSAApiTestCase, self).setUp() self.host = 'fakehost' self.user = 'fakeuser' self.url = None self.pool = 'fakepool' self.project = 'fakeproject' self.share = 'fakeshare' self.snap = 'fakesnapshot' _restclient.return_value = fake_zfssa.FakeRestClient() self._zfssa = zfssarest.ZFSSAApi() self._zfssa.set_host('fakehost') def _create_response(self, status): response = fake_zfssa.FakeResponse(status) return response def test_enable_service(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) self._zfssa.enable_service('nfs') self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.enable_service, 'nfs') def test_verify_avail_space(self): self.mock_object(self._zfssa, 'verify_project') self.mock_object(self._zfssa, 'get_project_stats') self._zfssa.get_project_stats.return_value = 2000 self._zfssa.verify_avail_space(self.pool, self.project, self.share, 1000) self.assertEqual(1, self._zfssa.verify_project.call_count) self.assertEqual(1, self._zfssa.get_project_stats.call_count) self._zfssa.verify_project.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.return_value = 900 self.assertRaises(exception.ShareBackendException, self._zfssa.verify_avail_space, self.pool, self.project, self.share, 1000) def test_create_project(self): self.mock_object(self._zfssa, 'verify_pool') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') arg = { 'name': self.project, 'sharesmb': 'off', 'sharenfs': 'off', 'mountpoint': 'fakemnpt', } self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) self._zfssa.create_project(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_pool.call_count) self._zfssa.verify_pool.assert_called_with(self.pool) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_project, self.pool, self.project, arg) def test_create_share(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = { "name": self.share, "quota": 1, } self._zfssa.create_share(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, arg, arg['quota']) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) def test_modify_share(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) arg = {"name": "dummyname"} svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.modify_share(self.pool, self.project, self.share, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.assert_called_with(svc, arg) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.modify_share, self.pool, self.project, self.share, arg) def test_delete_share(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.delete_share(self.pool, self.project, self.share) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) def test_create_snapshot(self): self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = {"name": self.snap} svc = self._zfssa.snapshots_path % (self.pool, self.project, self.share) self._zfssa.create_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.post.call_count) self._zfssa.rclient.post.assert_called_with(svc, arg) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.create_snapshot, self.pool, self.project, self.share, self.snap) def test_delete_snapshot(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.snapshot_path % (self.pool, self.project, self.share, self.snap) self._zfssa.delete_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.delete_snapshot, self.pool, self.project, self.share, self.snap) def test_clone_snapshot(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.CREATED) snapshot = { "id": self.snap, "share_id": self.share, } clone = { "id": "cloneid", "size": 1, } arg = { "name": "dummyname", "quota": 1, } self._zfssa.clone_snapshot(self.pool, self.project, snapshot, clone, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, clone['id'], clone['size']) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.clone_snapshot, self.pool, self.project, snapshot, clone, arg) def _create_entry(self, sharenfs, ip): if sharenfs == 'off': sharenfs = 'sec=sys' entry = (',rw=@%s' % ip) if '/' not in ip: entry = entry + '/32' arg = {'sharenfs': sharenfs + entry} return arg def test_allow_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') details = {"sharenfs": "off"} access = { "access_type": "nonip", "access_to": "foo", } # invalid access type self.assertRaises(exception.InvalidShareAccess, self._zfssa.allow_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) arg = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # add another entry access.update({"access_to": "10.0.0.1/24"}) arg = self._create_entry("off", access['access_to']) self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # verify modify_share is not called if sharenfs='on' details = {"sharenfs": "on"} self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) # verify modify_share is not called if ip is already in the list access.update({"access_to": "10.0.0.1/24"}) details = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) def test_deny_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') data1 = self._create_entry("off", "10.0.0.1") access = { "access_type": "nonip", "access_to": "foo", } # invalid access_type self.assertRaises(exception.InvalidShareAccess, self._zfssa.deny_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) self._zfssa.get_share.return_value = data1 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(0, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) # another valid entry data1 = self._create_entry(data1['sharenfs'], '10.0.0.2/24') data2 = self._create_entry(data1['sharenfs'], access['access_to']) self._zfssa.get_share.return_value = data2 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, data1) manila-2.0.0/manila/tests/share/drivers/hdfs/0000775000567000056710000000000012701407265022226 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hdfs/test_hdfs_native.py0000664000567000056710000005420212701407107026127 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for HDFS native protocol driver module.""" import socket import mock from oslo_concurrency import processutils from oslo_config import cfg import six from manila import context from manila import exception import manila.share.configuration as config import manila.share.drivers.hdfs.hdfs_native as hdfs_native from manila import test from manila.tests import fake_share from manila import utils CONF = cfg.CONF class HDFSNativeShareDriverTestCase(test.TestCase): """Tests HDFSNativeShareDriver.""" def setUp(self): super(HDFSNativeShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._hdfs_execute = mock.Mock(return_value=('', '')) self.local_ip = '192.168.1.1' CONF.set_default('driver_handles_share_servers', False) CONF.set_default('hdfs_namenode_ip', self.local_ip) CONF.set_default('hdfs_ssh_name', 'fake_sshname') CONF.set_default('hdfs_ssh_pw', 'fake_sshpw') CONF.set_default('hdfs_ssh_private_key', 'fake_sshkey') self.fake_conf = config.Configuration(None) self._driver = hdfs_native.HDFSNativeShareDriver( execute=self._hdfs_execute, configuration=self.fake_conf) self.hdfs_bin = 'hdfs' self._driver._hdfs_bin = 'fake_hdfs_bin' self.share = fake_share.fake_share(share_proto='HDFS') self.snapshot = fake_share.fake_snapshot(share_proto='HDFS') self.access = fake_share.fake_access(access_type='user') self.fakesharepath = 'hdfs://1.2.3.4:5/share-0' self.fakesnapshotpath = '/share-0/.snapshot/snapshot-0' socket.gethostname = mock.Mock(return_value='testserver') socket.gethostbyname_ex = mock.Mock(return_value=( 'localhost', ['localhost.localdomain', 'testserver'], ['127.0.0.1', self.local_ip])) def test_do_setup(self): self._driver.do_setup(self._context) self.assertEqual(self._driver._hdfs_bin, self.hdfs_bin) def test_create_share(self): self._driver._create_share = mock.Mock() self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath) result = self._driver.create_share(self._context, self.share, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_unsupported_proto(self): self._driver._get_share_path = mock.Mock() self.assertRaises(exception.HDFSException, self._driver.create_share, self._context, fake_share.fake_share(), share_server=None) self.assertFalse(self._driver._get_share_path.called) def test__set_share_size(self): share_dir = '/' + self.share['name'] sizestr = six.text_type(self.share['size']) + 'g' self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size(self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__set_share_size_exception(self): share_dir = '/' + self.share['name'] sizestr = six.text_type(self.share['size']) + 'g' self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._set_share_size, self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__set_share_size_with_new_size(self): share_dir = '/' + self.share['name'] new_size = 'fake_size' sizestr = new_size + 'g' self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size(self.share, new_size) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__create_share(self): share_dir = '/' + self.share['name'] self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size = mock.Mock() self._driver._create_share(self.share) self._driver._hdfs_execute.assert_any_call( 'fake_hdfs_bin', 'dfs', '-mkdir', share_dir) self._driver._set_share_size.assert_called_once_with(self.share) self._driver._hdfs_execute.assert_any_call( 'fake_hdfs_bin', 'dfsadmin', '-allowSnapshot', share_dir) def test__create_share_exception(self): share_dir = '/' + self.share['name'] self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._create_share, self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-mkdir', share_dir) def test_create_share_from_empty_snapshot(self): return_hdfs_execute = (None, None) self._driver._hdfs_execute = mock.Mock( return_value=return_hdfs_execute) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with( self.snapshot) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_from_snapshot(self): return_hdfs_execute = ("fake_content", None) self._driver._hdfs_execute = mock.Mock( return_value=return_hdfs_execute) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with( self.snapshot) calls = [mock.call('fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath), mock.call('fake_hdfs_bin', 'dfs', '-cp', self.fakesnapshotpath + '/*', '/' + self.share['name'])] self._driver._hdfs_execute.assert_has_calls(calls) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_from_snapshot_exception(self): self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath) self.assertFalse(self._driver._get_share_path.called) def test_create_snapshot(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.create_snapshot(self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-createSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_create_snapshot_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.create_snapshot, self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-createSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_delete_share(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.delete_share(self._context, self.share, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-rm', '-r', '/' + self.share['name']) def test_delete_share_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.delete_share, self._context, self.share, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-rm', '-r', '/' + self.share['name']) def test_delete_snapshot(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.delete_snapshot(self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-deleteSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_delete_snapshot_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.delete_snapshot, self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-deleteSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_allow_access(self): self._driver._hdfs_execute = mock.Mock( return_value=['', '']) share_dir = '/' + self.share['name'] user_access = ':'.join([self.access['access_type'], self.access['access_to'], 'rwx']) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] self._driver.allow_access(self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_allow_access_invalid_access_type(self): self.assertRaises(exception.InvalidShareAccess, self._driver.allow_access, self._context, self.share, fake_share.fake_access( access_type='invalid_access_type'), share_server=None) def test_allow_access_invalid_access_level(self): self.assertRaises(exception.InvalidShareAccess, self._driver.allow_access, self._context, self.share, fake_share.fake_access( access_level='invalid_access_level'), share_server=None) def test_allow_access_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) share_dir = '/' + self.share['name'] user_access = ':'.join([self.access['access_type'], self.access['access_to'], 'rwx']) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] self.assertRaises(exception.HDFSException, self._driver.allow_access, self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_deny_access(self): self._driver._hdfs_execute = mock.Mock(return_value=['', '']) share_dir = '/' + self.share['name'] access_name = ':'.join([self.access['access_type'], self.access['access_to']]) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] self._driver.deny_access(self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_deny_access_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) share_dir = '/' + self.share['name'] access_name = ':'.join([self.access['access_type'], self.access['access_to']]) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] self.assertRaises(exception.HDFSException, self._driver.deny_access, self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_extend_share(self): new_size = "fake_size" self._driver._set_share_size = mock.Mock() self._driver.extend_share(self.share, new_size) self._driver._set_share_size.assert_called_once_with( self.share, new_size) def test__check_hdfs_state_healthy(self): fake_out = "fakeinfo\n...Status: HEALTHY" self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) result = self._driver._check_hdfs_state() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') self.assertTrue(result) def test__check_hdfs_state_down(self): fake_out = "fakeinfo\n...Status: DOWN" self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) result = self._driver._check_hdfs_state() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') self.assertFalse(result) def test__check_hdfs_state_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._check_hdfs_state) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') def test__get_available_capacity(self): fake_out = 'Configured Capacity: 2.4\n' + \ 'Total Capacity: 2\n' + \ 'DFS free: 1' self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) total, free = self._driver._get_available_capacity() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-report') self.assertEqual(2, total) self.assertEqual(1, free) def test__get_available_capacity_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._get_available_capacity) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-report') def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): self._driver._get_available_capacity = mock.Mock( return_value=(11111.0, 12345.0)) result = self._driver.get_share_stats(True) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', ] for key in expected_keys: self.assertIn(key, result) self.assertEqual('HDFS', result['storage_protocol']) self._driver._get_available_capacity.assert_called_once_with() def test__hdfs_local_execute(self): cmd = 'testcmd' self.mock_object(utils, 'execute', mock.Mock(return_value=True)) self._driver._hdfs_local_execute(cmd) utils.execute.assert_called_once_with(cmd, run_as_root=False) def test__hdfs_remote_execute(self): self._driver._run_ssh = mock.Mock(return_value=True) cmd = 'testcmd' self._driver._hdfs_remote_execute(cmd, check_exit_code=True) self._driver._run_ssh.assert_called_once_with( self.local_ip, tuple([cmd]), True) def test__run_ssh(self): ssh_output = 'fake_ssh_output' cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) result = self._driver._run_ssh(self.local_ip, cmd_list) utils.SSHPool.assert_called_once_with( self._driver.configuration.hdfs_namenode_ip, self._driver.configuration.hdfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.hdfs_ssh_name, password=self._driver.configuration.hdfs_ssh_pw, privatekey=self._driver.configuration.hdfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called_once_with() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) self.assertEqual(ssh_output, result) def test__run_ssh_exception(self): cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(side_effect=Exception)) self.assertRaises(exception.HDFSException, self._driver._run_ssh, self.local_ip, cmd_list) utils.SSHPool.assert_called_once_with( self._driver.configuration.hdfs_namenode_ip, self._driver.configuration.hdfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.hdfs_ssh_name, password=self._driver.configuration.hdfs_ssh_pw, privatekey=self._driver.configuration.hdfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called_once_with() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) manila-2.0.0/manila/tests/share/drivers/hdfs/__init__.py0000664000567000056710000000000012701407107024320 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hitachi/0000775000567000056710000000000012701407265022713 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hitachi/__init__.py0000664000567000056710000000000012701407107025005 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hitachi/test_hds_hnas.py0000664000567000056710000005566712701407107026131 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_config import cfg from manila import exception import manila.share.configuration import manila.share.driver from manila.share.drivers.hitachi import hds_hnas from manila.share.drivers.hitachi import ssh from manila import test CONF = cfg.CONF share = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 50, 'host': 'hnas', 'share_proto': 'NFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' 'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}], } share_invalid_host = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 50, 'host': 'invalid', 'share_proto': 'NFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' 'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}], } access = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'ip', 'access_to': '172.24.44.200', 'access_level': 'rw', 'state': 'active', } snapshot = { 'id': 'abba6d9b-f29c-4bf7-aac1-618cda7aaf0f', 'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', } invalid_share = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 100, 'host': 'hnas', 'share_proto': 'CIFS', } invalid_access_type = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'user', 'access_to': 'manila_user', 'access_level': 'rw', 'state': 'active', } invalid_access_level = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'ip', 'access_to': 'manila_user', 'access_level': '777', 'state': 'active', } @ddt.ddt class HDSHNASTestCase(test.TestCase): def setUp(self): super(HDSHNASTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) CONF.hds_hnas_evs_id = '2' CONF.hds_hnas_evs_ip = '172.24.44.10' CONF.hds_hnas_ip = '172.24.44.1' CONF.hds_hnas_ip_port = 'hds_hnas_ip_port' CONF.hds_hnas_user = 'hds_hnas_user' CONF.hds_hnas_password = 'hds_hnas_password' CONF.hds_hnas_file_system = 'file_system' CONF.hds_hnas_ssh_private_key = 'private_key' CONF.hds_hnas_cluster_admin_ip0 = None CONF.hds_hnas_stalled_job_timeout = 10 CONF.hds_hnas_driver_helper = ('manila.share.drivers.hitachi.ssh.' 'HNASSSHBackend') self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) self.mock_object(self.fake_private_storage, 'delete', mock.Mock(return_value=None)) self._driver = hds_hnas.HDSHNASDriver( private_storage=self.fake_private_storage, configuration=self.fake_conf) self._driver.backend_name = "hnas" self.mock_log = self.mock_object(hds_hnas, 'LOG') @ddt.data('hds_hnas_driver_helper', 'hds_hnas_evs_id', 'hds_hnas_evs_ip', 'hds_hnas_ip', 'hds_hnas_user') def test_init_invalid_conf_parameters(self, attr_name): self.mock_object(manila.share.driver.ShareDriver, '__init__') setattr(CONF, attr_name, None) self.assertRaises(exception.InvalidParameterValue, self._driver.__init__) def test_init_invalid_credentials(self): self.mock_object(manila.share.driver.ShareDriver, '__init__') CONF.hds_hnas_password = None CONF.hds_hnas_ssh_private_key = None self.assertRaises(exception.InvalidParameterValue, self._driver.__init__) def test_update_access(self): access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw' } access2 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access_list = [access1, access2] self.mock_object(self._driver, '_ensure_share') self.mock_object(ssh.HNASSSHBackend, "update_access_rule", mock.Mock()) self._driver.update_access('context', share, access_list, [], []) ssh.HNASSSHBackend.update_access_rule.assert_called_once_with( share['id'], [access1['access_to'] + '(' + access1['access_level'] + ',norootsquash)', access2['access_to'] + '(' + access2['access_level'] + ')']) self.assertTrue(self.mock_log.debug.called) def test_update_access_ip_exception(self): access1 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access2 = { 'access_type': 'something', 'access_to': '172.24.10.10', 'access_level': 'rw' } access_list = [access1, access2] self.mock_object(self._driver, '_ensure_share') self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', share, access_list, [], []) def test_update_access_not_found_exception(self): access1 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access2 = { 'access_type': 'something', 'access_to': '172.24.10.10', 'access_level': 'rw' } access_list = [access1, access2] self.mock_object(self._driver, '_ensure_share', mock.Mock( side_effect=exception.HNASItemNotFoundException(msg='fake'))) self.assertRaises(exception.ShareResourceNotFound, self._driver.update_access, 'context', share, access_list, add_rules=[], delete_rules=[]) def test_create_share(self): self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "quota_add", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock()) result = self._driver.create_share('context', share) self.assertEqual(self._driver.hnas_evs_ip + ":/shares/" + share['id'], result) self.assertTrue(self.mock_log.debug.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(share['id']) def test_create_share_export_error(self): self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "quota_add", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock( side_effect=exception.HNASBackendException('msg'))) self.mock_object(ssh.HNASSSHBackend, "vvol_delete", mock.Mock()) self.assertRaises(exception.HNASBackendException, self._driver.create_share, 'context', share) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.exception.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(share['id']) ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share['id']) def test_create_share_invalid_share_protocol(self): self.mock_object(hds_hnas.HDSHNASDriver, "_create_share", mock.Mock(return_value="path")) self.assertRaises(exception.ShareBackendException, self._driver.create_share, 'context', invalid_share) def test_delete_share(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "nfs_export_del", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_delete", mock.Mock()) self._driver.delete_share('context', share) self.assertTrue(self.mock_log.debug.called) ssh.HNASSSHBackend.nfs_export_del.assert_called_once_with(share['id']) ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share['id']) def test_create_snapshot(self): self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share") self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(ssh.HNASSSHBackend, "get_host_list", mock.Mock( return_value=['172.24.44.200(rw)'])) self.mock_object(ssh.HNASSSHBackend, "update_access_rule", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock()) self._driver.create_snapshot('context', snapshot) ssh.HNASSSHBackend.get_host_list.assert_called_once_with(share['id']) ssh.HNASSSHBackend.update_access_rule.assert_any_call( share['id'], ['172.24.44.200(ro)']) ssh.HNASSSHBackend.update_access_rule.assert_any_call( share['id'], ['172.24.44.200(rw)']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/shares/' + share['id'], '/snapshots/' + share['id'] + '/' + snapshot['id']) def test_create_snapshot_first_snapshot(self): self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share") self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(ssh.HNASSSHBackend, "get_host_list", mock.Mock( return_value=['172.24.44.200(rw)'])) self.mock_object(ssh.HNASSSHBackend, "update_access_rule", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock( side_effect=exception.HNASNothingToCloneException('msg'))) self.mock_object(ssh.HNASSSHBackend, "create_directory", mock.Mock()) self._driver.create_snapshot('context', snapshot) self.assertTrue(self.mock_log.warning.called) ssh.HNASSSHBackend.get_host_list.assert_called_once_with(share['id']) ssh.HNASSSHBackend.update_access_rule.assert_any_call( share['id'], ['172.24.44.200(ro)']) ssh.HNASSSHBackend.update_access_rule.assert_any_call( share['id'], ['172.24.44.200(rw)']) ssh.HNASSSHBackend.create_directory.assert_called_once_with( '/snapshots/' + share['id'] + '/' + snapshot['id']) def test_delete_snapshot(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(ssh.HNASSSHBackend, "tree_delete", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "delete_directory", mock.Mock()) self._driver.delete_snapshot('context', snapshot) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.info.called) ssh.HNASSSHBackend.tree_delete.assert_called_once_with( '/snapshots/' + share['id'] + '/' + snapshot['id']) ssh.HNASSSHBackend.delete_directory.assert_called_once_with( '/snapshots/' + share['id']) def test_ensure_share(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "check_vvol", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "check_quota", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "check_export", mock.Mock()) result = self._driver.ensure_share('context', share) self.assertEqual(['172.24.44.10:/shares/' + share['id']], result) ssh.HNASSSHBackend.check_vvol.assert_called_once_with(share['id']) ssh.HNASSSHBackend.check_quota.assert_called_once_with(share['id']) ssh.HNASSSHBackend.check_export.assert_called_once_with(share['id']) def test_shrink_share(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock( return_value=10)) self.mock_object(ssh.HNASSSHBackend, "modify_quota", mock.Mock()) self._driver.shrink_share(share, 11) ssh.HNASSSHBackend.get_share_usage.assert_called_once_with(share['id']) ssh.HNASSSHBackend.modify_quota.assert_called_once_with(share['id'], 11) def test_shrink_share_new_size_lower_than_usage(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock( return_value=10)) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, share, 9) ssh.HNASSSHBackend.get_share_usage.assert_called_once_with(share['id']) def test_extend_share(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock( return_value=(500, 200))) self.mock_object(ssh.HNASSSHBackend, "modify_quota", mock.Mock()) self._driver.extend_share(share, 150) ssh.HNASSSHBackend.get_stats.assert_called_once_with() ssh.HNASSSHBackend.modify_quota.assert_called_once_with(share['id'], 150) def test_extend_share_with_no_available_space_in_fs(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock( return_value=(500, 200))) self.mock_object(ssh.HNASSSHBackend, "modify_quota", mock.Mock()) self.assertRaises(exception.HNASBackendException, self._driver.extend_share, share, 1000) ssh.HNASSSHBackend.get_stats.assert_called_once_with() def test_manage_existing(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock( return_value=1)) self._driver.manage_existing(share, 'option') ssh.HNASSSHBackend.get_share_quota.assert_called_once_with(share['id']) def test_manage_existing_no_quota(self): self.mock_object(hds_hnas.HDSHNASDriver, "_get_hnas_share_id", mock.Mock(return_value=share['id'])) self.mock_object(hds_hnas.HDSHNASDriver, "_ensure_share", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock( return_value=None)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, 'option') ssh.HNASSSHBackend.get_share_quota.assert_called_once_with(share['id']) def test_manage_existing_wrong_share_id(self): self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value='Wrong_share_id')) self.assertRaises(exception.HNASBackendException, self._driver.manage_existing, share, 'option') def test_manage_existing_wrong_path_format(self): share['export_locations'] = [{'path': ':/'}] self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, 'option') def test_manage_existing_wrong_evs_ip(self): share['export_locations'] = [{'path': '172.24.44.189:/shares/' 'aa4a7710-f326-41fb-ad18-'}] self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, 'option') def test_manage_existing_invalid_host(self): self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share_invalid_host, 'option') def test_unmanage(self): self._driver.unmanage(share) self.assertTrue(self.fake_private_storage.delete.called) self.assertTrue(self.mock_log.info.called) def test_get_network_allocations_number(self): result = self._driver.get_network_allocations_number() self.assertEqual(0, result) def test_create_share_from_snapshot(self): self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "quota_add", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock()) result = self._driver.create_share_from_snapshot('context', share, snapshot) self.assertEqual('172.24.44.10:/shares/' + share['id'], result) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/snapshots/' + snapshot['share_id'] + '/' + snapshot['id'], '/shares/' + share['id']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(share['id']) def test_create_share_from_snapshot_empty_snapshot(self): self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "quota_add", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock( side_effect=exception.HNASNothingToCloneException('msg'))) self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock()) result = self._driver.create_share_from_snapshot('context', share, snapshot) self.assertEqual('172.24.44.10:/shares/' + share['id'], result) self.assertTrue(self.mock_log.warning.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/snapshots/' + snapshot['share_id'] + '/' + snapshot['id'], '/shares/' + share['id']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with(share['id']) def test__check_fs_mounted(self): self.mock_object(ssh.HNASSSHBackend, 'check_fs_mounted', mock.Mock( return_value=True)) self._driver._check_fs_mounted() ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with() def test__check_fs_mounted_not_mounted(self): self.mock_object(ssh.HNASSSHBackend, 'check_fs_mounted', mock.Mock( return_value=False)) self.mock_object(ssh.HNASSSHBackend, 'mount', mock.Mock()) self._driver._check_fs_mounted() ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with() ssh.HNASSSHBackend.mount.assert_called_once_with() self.assertTrue(self.mock_log.debug.called) def test__update_share_stats(self): fake_data = { 'share_backend_name': self._driver.backend_name, 'driver_handles_share_servers': self._driver.driver_handles_share_servers, 'vendor_name': 'HDS', 'driver_version': '2.0.0', 'storage_protocol': 'NFS', 'total_capacity_gb': 1000, 'free_capacity_gb': 200, 'reserved_percentage': hds_hnas.CONF.reserved_share_percentage, 'qos': False, 'thin_provisioning': True, } self.mock_object(ssh.HNASSSHBackend, 'get_stats', mock.Mock( return_value=(1000, 200))) self.mock_object(hds_hnas.HDSHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(manila.share.driver.ShareDriver, '_update_share_stats', mock.Mock()) self._driver._update_share_stats() self.assertTrue(self._driver.hnas.get_stats.called) (manila.share.driver.ShareDriver._update_share_stats. assert_called_once_with(fake_data)) self.assertTrue(self.mock_log.info.called) manila-2.0.0/manila/tests/share/drivers/hitachi/test_ssh.py0000664000567000056710000011203712701407107025120 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import ddt import mock from oslo_concurrency import processutils as putils from oslo_config import cfg import paramiko import six from manila import exception from manila.share.drivers.hitachi import ssh from manila import test from manila import utils as mutils CONF = cfg.CONF HNAS_RESULT_empty = "" HNAS_RESULT_limits = """ Filesystem Ensure on span fake_fs: Current capacity 50GiB Thin provision: disabled Filesystem is confined to: 100GiB (Run 'filesystem-confine') Free space on span allows expansion to: 143GiB (Run 'span-expand') Chunk size allows growth to: 1069GiB (This is a conservative estimate) Largest filesystem that can be checked: 262144GiB (This is a hard limit) This server model allows growth to: 262144GiB (Upgrade the server) """ HNAS_RESULT_expdel = """Deleting the export '/dir1' on fs 'fake_fs'... NFS Export Delete: Export successfully deleted""" HNAS_RESULT_vvoldel = """ Warning: Clearing dangling space trackers from empty vivol""" HNAS_RESULT_selectfs = "Current selected file system: fake_fs, number(1)" HNAS_RESULT_fs = """ \ Instance name Dev On span State EVS Cap/GiB Confined Flag ----------------- ---- ------- ------ --- ------- -------- ---- Filesystem 8e6e2c85-fake-long-filesystem-b9b4-e4b09993841e: 8e6e2c8..9993841e 1057 fake_span Mount 2 4 3 fake_fs 1051 fake_span NoEVS - 100 1024 file_system 1055 fake_span Mount 2 4 5 1 """ HNAS_RESULT_u_fs = """ \ Instance name Dev On span State EVS Cap/GiB Confined Flag ----------------- ---- ------- ------ --- ------- -------- ---- file_system 1055 fake_span Umount 2 4 5 file_system2 1050 fake_span2 NoEVS - 10 0 1 fake_fs 1051 fake_span Umount 2 100 1024 """ HNAS_RESULT_one_fs = """ \ Instance name Dev On span State EVS Cap/GiB Confined Flag ----------------- ---- ------- ------ --- ------- -------- ---- fake_fs 1051 fake_span Mount 2 100 1024 1""" HNAS_RESULT_expadd = "NFS Export Add: Export added successfully" HNAS_RESULT_vvol = """vvol_test email : root : /vvol_test tag : 39 usage bytes : 0 B files: 1 last modified: 2015-06-23 22:36:12.830698800+00:00""" HNAS_RESULT_vvol_error = "The virtual volume does not exist." HNAS_RESULT_mount = """ \ Request to mount file system fake_fs submitted successfully. File system fake_fs successfully mounted.""" HNAS_RESULT_quota = """Type : Explicit Target : ViVol: vvol_test Usage : 1 GB Limit : 5 GB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_tb = """Type : Explicit Target : ViVol: vvol_test Usage : 1 TB Limit : 1 TB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_mb = """Type : Explicit Target : ViVol: vvol_test Usage : 20 MB Limit : 500 MB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_unset = """Type : Explicit Target : ViVol: vvol_test Usage : 0 B Limit : Unset Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_err = """No quotas matching specified filter criteria. """ HNAS_RESULT_export = """Export name: vvol_test Export path: /vvol_test File system label: file_system File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default \n Export configuration:\n 127.0.0.2 """ HNAS_RESULT_wrong_export = """Export name: wrong_name Export path: /vvol_test File system label: file_system File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1""" HNAS_RESULT_exp_no_fs = """ Export name: no_fs Export path: /export_without_fs File system info: *** not available *** Access snapshots: Yes Display snapshots: Yes Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: """ HNAS_RESULT_export_ip = """ Export name: vvol_test Export path: /vvol_test File system label: fake_fs File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1(rw) """ HNAS_RESULT_export_ip2 = """ Export name: vvol_test Export path: /vvol_test File system label: fake_fs File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1(ro) """ HNAS_RESULT_expmod = """Modifying the export '/fake_export' on fs 'fake_fs'... NFS Export Modify: changing configuration options to: 127.0.0.2 NFS Export Modify: Export modified successfully""" HNAS_RESULT_expnotmod = "Export not modified." HNAS_RESULT_fslimits = """ Filesystem fake_fs on span fake_span: Current capacity 100GiB Thin provision: disabled Free space on span allows expansion to: 10GiB (Run 'span-expand') Filesystem is confined to: 1024GiB (Run 'filesystem-confine') Chunk size allows growth to: 1024GiB (This is a conservative \ estimate) Largest filesystem that can be checked: 10000GiB (This is a hard limit) This server model allows growth to: 10000GiB (Upgrade the server) """ HNAS_RESULT_fslimits_tb = """ \ Filesystem fake_fs on span fake_span: Current capacity 1500GiB Thin provision: disabled Free space on span allows expansion to: 1000GiB (Run 'span-expand') Filesystem is confined to: 10240GiB (Run 'filesystem-confine') Chunk size allows growth to: 10240GiB (This is a conservative \ estimate) Largest filesystem that can be checked: 10000GiB (This is a hard limit) This server model allows growth to: 10000GiB (Upgrade the server) """ HNAS_RESULT_job = """tree-operation-job-submit: Request submitted successfully. tree-operation-job-submit: Job id = d933100a-b5f6-11d0-91d9-836896aada5d""" HNAS_RESULT_vvol_list = """vol1 email : root : /shares/vol1 tag : 10 usage bytes : 0 B files: 1 last modified: 2015-07-27 22:25:02.746426000+00:00 vol2 email : root : /shares/vol2 tag : 13 usage bytes : 0 B files: 1 last modified: 2015-07-28 01:30:21.125671700+00:00 vol3 email : root : /shares/vol3 tag : 14 usage bytes : 5 GB (5368709120 B) files: 2 last modified: 2015-07-28 20:23:05.672404600+00:00""" HNAS_RESULT_tree_job_status_fail = """JOB ID : d933100a-b5f6-11d0-91d9-836896aada5d Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job failed Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 block special devices, 25 character devices""" HNAS_RESULT_job = """tree-operation-job-submit: Request submitted successfully. tree-operation-job-submit: Job id = d933100a-b5f6-11d0-91d9-836896aada5d """ HNAS_RESULT_job_completed = """JOB ID : ab4211b8-aac8-11ce-91af-39e0822ea368 Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job was completed Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 \ block special devices, 25 character devices """ HNAS_RESULT_job_running = """JOB ID : ab4211b8-aac8-11ce-91af-39e0822ea368 Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job is running Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 \ block special devices, 25 character devices """ HNAS_RESULT_df = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3 70.00 GB 10.00 GB (75%) 0 B (0%) NA \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs """ HNAS_RESULT_df_tb = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3.00 7.00 TB 2 TB (75%) 0 B (0%) NA \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs """ HNAS_RESULT_mounted_filesystem = """ file_system 1055 fake_span Mount 2 4 5 1 """ HNAS_RESULT_unmounted_filesystem = """ file_system 1055 fake_span Umount 2 4 5 1 """ @ddt.ddt class HNASSSHTestCase(test.TestCase): def setUp(self): super(HNASSSHTestCase, self).setUp() self.ip = '192.168.1.1' self.port = 22 self.user = 'hnas_user' self.password = 'hnas_password' self.default_commands = ['ssc', '127.0.0.1'] self.fs_name = 'file_system' self.evs_ip = '172.24.44.1' self.evs_id = 2 self.ssh_private_key = 'private_key' self.cluster_admin_ip0 = 'fake' self.job_timeout = 30 self.mock_log = self.mock_object(ssh, 'LOG') self._driver_ssh = ssh.HNASSSHBackend(self.ip, self.user, self.password, self.ssh_private_key, self.cluster_admin_ip0, self.evs_id, self.evs_ip, self.fs_name, self.job_timeout) self.vvol = { 'id': 'vvol_test', 'share_proto': 'nfs', 'size': 4, 'host': '127.0.0.1', } self.snapshot = { 'id': 'snapshot_test', 'share_proto': 'nfs', 'size': 4, 'share_id': 'vvol_test', 'host': 'ubuntu@hds2#HDS2', } def test_get_stats(self): fake_list_command = ['df', '-a', '-f', 'file_system'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(return_value=(HNAS_RESULT_df_tb, ""))) total, free = self._driver_ssh.get_stats() ssh.HNASSSHBackend._execute.assert_called_with(fake_list_command) self.assertEqual(7168.0, total) self.assertEqual(5120.0, free) def test_nfs_export_add(self): fake_nfs_command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1', '/shares/vvol_test', self.fs_name, '/shares/vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock()) self._driver_ssh.nfs_export_add('vvol_test') self._driver_ssh._execute.assert_called_with(fake_nfs_command) def test_nfs_export_del(self): fake_nfs_command = ['nfs-export', 'del', '/shares/vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock()) self._driver_ssh.nfs_export_del('vvol_test') self._driver_ssh._execute.assert_called_with(fake_nfs_command) def test_nfs_export_del_inexistent_export(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='does not exist')])) self._driver_ssh.nfs_export_del('vvol_test') self.assertTrue(self.mock_log.warning.called) def test_nfs_export_del_error(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.nfs_export_del, 'vvol_test') self.assertTrue(self.mock_log.exception.called) def test_get_host_list(self): self.mock_object(ssh.HNASSSHBackend, "_get_share_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_export)])) host_list = self._driver_ssh.get_host_list('fake_id') self.assertEqual(['127.0.0.2'], host_list) def test_update_access_rule_empty_host_list(self): fake_export_command = ['nfs-export', 'mod', '-c', '127.0.0.1', '/shares/fake_id'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock()) self._driver_ssh.update_access_rule("fake_id", []) self._driver_ssh._execute.assert_called_with(fake_export_command) def test_update_access_rule(self): fake_export_command = ['nfs-export', 'mod', '-c', u'"127.0.0.1,127.0.0.2"', '/shares/fake_id'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock()) self._driver_ssh.update_access_rule("fake_id", ['127.0.0.1', '127.0.0.2']) self._driver_ssh._execute.assert_called_with(fake_export_command) def test_tree_clone_nothing_to_clone(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Cannot find any clonable files in the source directory' )])) self.assertRaises(exception.HNASNothingToCloneException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_called_with(fake_tree_clone_command) def test_tree_clone_error_cloning(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_called_with(fake_tree_clone_command) self.assertTrue(self.mock_log.exception.called) def test_tree_clone(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_job_completed, '')])) self._driver_ssh.tree_clone("/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.debug.called) def test_tree_clone_job_failed(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_tree_job_status_fail, '')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.error.called) def test_tree_clone_job_timeout(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_empty, '')])) self.mock_object(time, "time", mock.Mock(side_effect=[0, 0, 200, 200])) self.mock_object(time, "sleep", mock.Mock()) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.error.called) def test_tree_delete_path_does_not_exist(self): fake_tree_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/path'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Source path: Cannot access')] )) self._driver_ssh.tree_delete("/path") self.assertTrue(self.mock_log.warning.called) self._driver_ssh._execute.assert_called_with(fake_tree_delete_command) def test_tree_delete_error(self): fake_tree_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/path'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='')] )) self.assertRaises(putils.ProcessExecutionError, self._driver_ssh.tree_delete, "/path") self.assertTrue(self.mock_log.exception.called) self._driver_ssh._execute.assert_called_with(fake_tree_delete_command) def test_create_directory(self): locked_selectfs_args = ['create', '/path'] self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs", mock.Mock()) self._driver_ssh.create_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) def test_delete_directory(self): locked_selectfs_args = ['delete', '/path'] self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs", mock.Mock()) self._driver_ssh.delete_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) def test_check_fs_mounted_true(self): self.mock_object(ssh.HNASSSHBackend, "_get_filesystem_list", mock.Mock(return_value=[ssh.FileSystem( HNAS_RESULT_mounted_filesystem)])) self.assertTrue(self._driver_ssh.check_fs_mounted()) def test_check_fs_mounted_false(self): self.mock_object(ssh.HNASSSHBackend, "_get_filesystem_list", mock.Mock(return_value=[ssh.FileSystem( HNAS_RESULT_unmounted_filesystem)])) self.assertFalse(self._driver_ssh.check_fs_mounted()) def test_check_fs_mounted_eror(self): self.mock_object(ssh.HNASSSHBackend, "_get_filesystem_list", mock.Mock(return_value=[])) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_fs_mounted) def test_mount_already_mounted(self): fake_mount_command = ['mount', self.fs_name] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr=''))) self.assertRaises(putils.ProcessExecutionError, self._driver_ssh.mount) self._driver_ssh._execute.assert_called_with(fake_mount_command) def test_vvol_create(self): fake_vvol_create_command = ['virtual-volume', 'add', '--ensure', self.fs_name, 'vvol', '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock()) self._driver_ssh.vvol_create("vvol") self._driver_ssh._execute.assert_called_with(fake_vvol_create_command) def test_vvol_delete_vvol_does_not_exist(self): fake_vvol_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Source path: Cannot access')] )) self._driver_ssh.vvol_delete("vvol") self.assertTrue(self.mock_log.debug.called) self._driver_ssh._execute.assert_called_with(fake_vvol_delete_command) def test_vvol_delete_error(self): fake_vvol_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='')] )) self.assertRaises(putils.ProcessExecutionError, self._driver_ssh.vvol_delete, "vvol") self.assertTrue(self.mock_log.exception.called) self._driver_ssh._execute.assert_called_with(fake_vvol_delete_command) def test_quota_add(self): fake_add_quota_command = ['quota', 'add', '--usage-limit', '1G', '--usage-hard-limit', 'yes', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock()) self._driver_ssh.quota_add('vvol', 1) self._driver_ssh._execute.assert_called_with(fake_add_quota_command) def test_modify_quota(self): fake_modify_quota_command = ['quota', 'mod', '--usage-limit', '1G', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock()) self._driver_ssh.modify_quota('vvol', 1) self._driver_ssh._execute.assert_called_with(fake_modify_quota_command) def test_check_vvol(self): fake_check_vvol_command = ['virtual-volume', 'list', '--verbose', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr=''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_vvol, 'vvol') self._driver_ssh._execute.assert_called_with(fake_check_vvol_command) def test_check_quota(self): fake_check_quota_command = ['quota', 'list', '--verbose', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=('No quotas matching specified filter criteria', ''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_quota, 'vvol') self._driver_ssh._execute.assert_called_with(fake_check_quota_command) def test_check_export(self): self.mock_object(ssh.HNASSSHBackend, "_get_share_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_export)])) self._driver_ssh.check_export("vvol_test") def test_check_export_error(self): self.mock_object(ssh.HNASSSHBackend, "_get_share_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_wrong_export)])) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_export, "vvol_test") def test_get_share_quota(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertEqual(5, result) @ddt.data(HNAS_RESULT_quota_unset, HNAS_RESULT_quota_err) def test_get_share_quota_errors(self, hnas_output): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(hnas_output, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertIsNone(result) def test_get_share_quota_tb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_tb, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertEqual(1024, result) def test_get_share_quota_mb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_mb, ''))) self.assertRaises(exception.HNASBackendException, self._driver_ssh.get_share_quota, "vvol_test") def test_get_share_usage(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota, ''))) self.assertEqual(1, self._driver_ssh.get_share_usage("vvol_test")) def test_get_share_usage_error(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_err, ''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.get_share_usage, "vvol_test") def test_get_share_usage_mb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_mb, ''))) self.assertEqual(0.01953125, self._driver_ssh.get_share_usage( "vvol_test")) def test_get_share_usage_tb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_tb, ''))) self.assertEqual(1024, self._driver_ssh.get_share_usage("vvol_test")) def test__get_share_export(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(return_value=[HNAS_RESULT_export_ip, ''])) export_list = self._driver_ssh._get_share_export('fake_id') self.assertEqual('vvol_test', export_list[0].export_name) self.assertEqual('/vvol_test', export_list[0].export_path) self.assertEqual('fake_fs', export_list[0].file_system_label) self.assertEqual('Yes', export_list[0].mounted) def test__get_share_export_exception_not_found(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError( stderr="NFS Export List: Export 'id' does not exist.") )) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh._get_share_export, 'fake_id') def test__get_share_export_exception_error(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr="Some error.") )) self.assertRaises(putils.ProcessExecutionError, self._driver_ssh._get_share_export, 'fake_id') def test__get_filesystem_list(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(return_value=[HNAS_RESULT_fs, ''])) out = self._driver_ssh._get_filesystem_list() self.assertEqual('8e6e2c85-fake-long-filesystem-b9b4-e4b09993841e', out[0].name) self.assertEqual('fake_span', out[0].on_span) self.assertEqual('Mount', out[0].state) self.assertEqual(2, out[0].evs) self.assertTrue(self.mock_log.debug.called) def test__execute(self): key = self.ssh_private_key commands = ['tree-clone-job-submit', '-e', '/src', '/dst'] concat_command = ('ssc --smuauth fake console-context --evs 2 ' 'tree-clone-job-submit -e /src /dst') self.mock_object(paramiko.SSHClient, 'connect') self.mock_object(putils, 'ssh_execute', mock.Mock(return_value=[HNAS_RESULT_job, ''])) output, err = self._driver_ssh._execute(commands) putils.ssh_execute.assert_called_once_with(mock.ANY, concat_command, check_exit_code=True) paramiko.SSHClient.connect.assert_called_with(self.ip, username=self.user, key_filename=key, look_for_keys=False, timeout=None, password=self.password, port=self.port) self.assertIn('Request submitted successfully.', output) def test__execute_ssh_exception(self): commands = ['tree-clone-job-submit', '-e', '/src', '/dst'] concat_command = ('ssc --smuauth fake console-context --evs 2 ' 'tree-clone-job-submit -e /src /dst') msg = 'Failed to establish SSC connection' self.mock_object(time, "sleep", mock.Mock()) self.mock_object(paramiko.SSHClient, 'connect') self.mock_object(putils, 'ssh_execute', mock.Mock(side_effect=[ putils.ProcessExecutionError(stderr=msg), putils.ProcessExecutionError(stderr='Invalid!')])) self.mock_object(mutils.SSHPool, "item", mock.Mock(return_value=paramiko.SSHClient())) self.mock_object(paramiko.SSHClient, "set_missing_host_key_policy") self.assertRaises(putils.ProcessExecutionError, self._driver_ssh._execute, commands) putils.ssh_execute.assert_called_with(mock.ANY, concat_command, check_exit_code=True) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.error.called) def test__locked_selectfs_create_operation(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', six.text_type(self.evs_id), 'mkdir', '-p', '/path'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock()) self._driver_ssh._locked_selectfs('create', '/path') self._driver_ssh._execute.assert_called_with(exec_command) def test__locked_selectfs_delete_operation_successfull(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', six.text_type(self.evs_id), 'rmdir', '/path'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock()) self._driver_ssh._locked_selectfs('delete', '/path') self._driver_ssh._execute.assert_called_with(exec_command) def test__locked_selectfs_deleting_not_empty_directory(self): msg = 'This path has more snapshot. Currenty DirectoryNotEmpty' self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self._driver_ssh._locked_selectfs('delete', '/path') self.assertTrue(self.mock_log.debug.called) def test__locked_selectfs_delete_exception(self): msg = 'rmdir: cannot remove \'/path\'' self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self.assertRaises(putils.ProcessExecutionError, self._driver_ssh._locked_selectfs, 'delete', 'path') self.assertTrue(self.mock_log.exception.called) manila-2.0.0/manila/tests/share/drivers/quobyte/0000775000567000056710000000000012701407265022772 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/quobyte/test_quobyte.py0000664000567000056710000006454612701407107026105 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Quobyte, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg import six from manila import context from manila import exception from manila.share import configuration as config from manila.share import driver from manila.share.drivers.quobyte import jsonrpc from manila.share.drivers.quobyte import quobyte from manila import test from manila.tests import fake_share CONF = cfg.CONF def fake_rpc_handler(name, *args): if name == 'resolveVolumeName': return None elif name == 'createVolume': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': 'fake_location', 'nfs_export_path': '/fake_share'} elif name == 'getConfiguration': return { "tenant_configuration": [{ "domain_name": "fake_domain_name", "volume_access": [ {"volume_uuid": "fake_id_1", "restrict_to_network": "10.0.0.1", "read_only": False}, {"volume_uuid": "fake_id_1", "restrict_to_network": "10.0.0.2", "read_only": False}, {"volume_uuid": "fake_id_2", "restrict_to_network": "10.0.0.3", "read_only": False} ]}, {"domain_name": "fake_domain_name_2", "volume_access": [ {"volume_uuid": "fake_id_3", "restrict_to_network": "10.0.0.4", "read_only": False}, {"volume_uuid": "fake_id_3", "restrict_to_network": "10.0.0.5", "read_only": True}, {"volume_uuid": "fake_id_4", "restrict_to_network": "10.0.0.6", "read_only": False} ]} ] } else: return "Unknown fake rpc handler call" def create_fake_access(access_adr, access_id='fake_access_id', access_type='ip', access_level='rw'): return { 'access_id': access_id, 'access_type': access_type, 'access_to': access_adr, 'access_level': access_level } class QuobyteShareDriverTestCase(test.TestCase): """Tests QuobyteShareDriver.""" def setUp(self): super(QuobyteShareDriverTestCase, self).setUp() self._context = context.get_admin_context() CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf) self._driver.rpc = mock.Mock() self.share = fake_share.fake_share(share_proto='NFS') self.access = fake_share.fake_access() @mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock()) def test_do_setup_success(self): self._driver.rpc.call = mock.Mock(return_value=None) self._driver.do_setup(self._context) self._driver.rpc.call.assert_called_with('getInformation', {}) @mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__', mock.Mock(return_value=None)) @mock.patch.object(jsonrpc.JsonRpc, 'call', side_effect=exception.QBRpcException) def test_do_setup_failure(self, mock_call): self.assertRaises(exception.QBException, self._driver.do_setup, self._context) def test_create_share_new_volume(self): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) result = self._driver.create_share(self._context, self.share) self.assertEqual(self.share['export_location'], result) self._driver.rpc.call.assert_has_calls([ mock.call('createVolume', dict( name=self.share['name'], tenant_domain=self.share['project_id'], root_user_id=self.fake_conf.quobyte_default_volume_user, root_group_id=self.fake_conf.quobyte_default_volume_group, configuration_name=self.fake_conf.quobyte_volume_configuration )), mock.call('exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))]) def test_create_share_existing_volume(self): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self._driver.create_share(self._context, self.share) self._driver.rpc.call.assert_called_with( 'exportVolume', dict(protocol='NFS', volume_uuid='voluuid')) def test_create_share_wrong_protocol(self): share = {'share_proto': 'WRONG_PROTOCOL'} self.assertRaises(exception.QBException, self._driver.create_share, context=None, share=share) def test_delete_share_existing_volume(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {} self._driver.configuration.quobyte_delete_shares = True self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) self._driver.rpc.call.assert_has_calls([ mock.call('resolveVolumeName', {'volume_name': 'fakename', 'tenant_domain': 'fake_project_uuid'}), mock.call('deleteVolume', {'volume_uuid': 'voluuid'}), mock.call('exportVolume', {'volume_uuid': 'voluuid', 'remove_export': True})]) def test_delete_share_existing_volume_disabled(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {} CONF.set_default('quobyte_delete_shares', False) self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'remove_export': True}) @mock.patch.object(quobyte.LOG, 'warning') def test_delete_share_nonexisting_volume(self, mock_warning): def rpc_handler(name, *args): if name == 'resolveVolumeName': return None self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) mock_warning.assert_called_with( 'No volume found for share fake_project_uuid/fakename') def test_allow_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver._allow_access(self._context, self.share, self.access) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'read_only': False, 'add_allow_ip': '10.0.0.1'}) def test_allow_ro_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) ro_access = fake_share.fake_access(access_level='ro') self._driver._allow_access(self._context, self.share, ro_access) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'read_only': True, 'add_allow_ip': '10.0.0.1'}) def test_allow_access_nonip(self): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.access = fake_share.fake_access(**{"access_type": "non_existant_access_type"}) self.assertRaises(exception.InvalidShareAccess, self._driver._allow_access, self._context, self.share, self.access) def test_deny_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver._deny_access(self._context, self.share, self.access) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'}) @mock.patch.object(quobyte.LOG, 'debug') def test_deny_access_nonip(self, mock_debug): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.access = fake_share.fake_access( access_type="non_existant_access_type") self._driver._deny_access(self._context, self.share, self.access) mock_debug.assert_called_with( 'Quobyte driver only supports ip access control. ' 'Ignoring deny access call for %s , %s', 'fakename', 'fake_project_uuid') def test_resolve_volume_name(self): self._driver.rpc.call = mock.Mock( return_value={'volume_uuid': 'fake_uuid'}) self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name') self._driver.rpc.call.assert_called_with( 'resolveVolumeName', {'volume_name': 'fake_vol_name', 'tenant_domain': 'fake_domain_name'}) def test_resolve_volume_name_NOENT(self): self._driver.rpc.call = mock.Mock( return_value=None) self.assertIsNone( self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')) def test_resolve_volume_name_other_error(self): self._driver.rpc.call = mock.Mock( side_effect=exception.QBRpcException( result='fubar', qbcode=666)) self.assertRaises(exception.QBRpcException, self._driver._resolve_volume_name, volume_name='fake_vol_name', tenant_domain='fake_domain_name') @mock.patch.object(driver.ShareDriver, '_update_share_stats') def test_update_share_stats(self, mock_uss): self._driver._get_capacities = mock.Mock(return_value=[42, 23]) self._driver._update_share_stats() mock_uss.assert_called_once_with( dict(storage_protocol='NFS', vendor_name='Quobyte', share_backend_name=self._driver.backend_name, driver_version=self._driver.DRIVER_VERSION, total_capacity_gb=42, free_capacity_gb=23, reserved_percentage=0)) def test_get_capacities_gb(self): capval = 42115548133 useval = 19695128917 self._driver.rpc.call = mock.Mock( return_value={'total_logical_capacity': six.text_type(capval), 'total_logical_usage': six.text_type(useval)}) self.assertEqual((39.223160718, 20.880642548), self._driver._get_capacities()) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value="fake_uuid") def test_ensure_share(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) result = self._driver.ensure_share(self._context, self.share, None) self.assertEqual(self.share["export_location"], result) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) self._driver.rpc.call.assert_has_calls([ mock.call('exportVolume', dict( volume_uuid="fake_uuid", protocol='NFS' ))]) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value=None) def test_ensure_deleted_share(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.assertRaises(exception.ShareResourceNotFound, self._driver.ensure_share, self._context, self.share, None) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_extend_share(self, mock_qsd_resize_share): self._driver.extend_share(ext_share=self.share, ext_size=2, share_server=None) mock_qsd_resize_share.assert_called_once_with(share=self.share, new_size=2) def test_resize_share(self): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self._driver._resize_share(share=self.share, new_size=7) self._driver.rpc.call.assert_has_calls([ mock.call('setQuota', {"consumer": {"type": 3, "identifier": self.share["name"]}, "limits": {"type": 5, "value": 7}})]) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value="fake_id_3") def test_fetch_existing_access(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.4") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.5") exist_list = self._driver._fetch_existing_access(context=self._context, share=self.share) # assert expected result here self.assertEqual([old_access_1['access_to'], old_access_2['access_to']], [e.get('access_to') for e in exist_list]) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_shrink_share(self, mock_qsd_resize_share): self._driver.shrink_share(shrink_share=self.share, shrink_size=3, share_server=None) mock_qsd_resize_share.assert_called_once_with(share=self.share, new_size=3) def test_subtract_access_lists(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_type="rw",) access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_type="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_type="ro") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_type="rw") access_5 = create_fake_access(access_id="old_3", access_adr="10.0.0.4", access_type="rw") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_3, access_2] self.assertEqual([access_1, access_4], self._driver._subtract_access_lists(min_list, sub_list)) def test_subtract_access_lists_level(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_level="rw") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_level="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_level="rw") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_level="rw") access_5 = create_fake_access(access_id="old_2_ro", access_adr="10.0.0.3", access_level="ro") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_2] self.assertEqual([access_1, access_3, access_4], self._driver._subtract_access_lists(min_list, sub_list)) def test_subtract_access_lists_type(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_type="ip") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_type="ip") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_type="ip") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_type="ip") access_5 = create_fake_access(access_id="old_2_ro", access_adr="10.0.0.3", access_type="other") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_2] self.assertEqual([access_1, access_3, access_4], self._driver._subtract_access_lists(min_list, sub_list)) @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_level="rw") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_level="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_level="rw") self._driver.update_access(self._context, self.share, access_rules=None, add_rules=[access_1], delete_rules=[access_2, access_3]) qb_allow_mock.assert_called_once_with(self._context, self.share, access_1) deny_calls = [mock.call(self._context, self.share, access_2), mock.call(self._context, self.share, access_3)] qb_deny_mock.assert_has_calls(deny_calls) @mock.patch.object(quobyte.LOG, "warning") def test_update_access_no_rules(self, qb_log_mock): self._driver.update_access(context=None, share=None, access_rules=[], add_rules=[], delete_rules=[]) qb_log_mock.assert_has_calls([mock.ANY]) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") def test_update_access_recovery_additionals(self, qb_allow_mock, qb_exist_mock, qb_subtr_mock): new_access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.2") old_access = create_fake_access(access_id="fake_access_id", access_adr="10.0.0.1") new_access_2 = create_fake_access(access_id="new_2", access_adr="10.0.0.3") add_access_rules = [new_access_1, old_access, new_access_2] qb_exist_mock.return_value = [old_access] qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []] self._driver.update_access(self._context, self.share, access_rules=add_access_rules, add_rules=[], delete_rules=[]) assert_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] qb_allow_mock.assert_has_calls(assert_calls, any_order=True) qb_exist_mock.assert_called_once_with(self._context, self.share) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") def test_update_access_recovery_superfluous(self, qb_deny_mock, qb_exist_mock, qb_subtr_mock): old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.1") missing_access_1 = create_fake_access(access_id="mis_1", access_adr="10.0.0.2") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.3") qb_exist_mock.side_effect = [[old_access_1, old_access_2]] qb_subtr_mock.side_effect = [[], [missing_access_1]] old_access_rules = [old_access_1, old_access_2] self._driver.update_access(self._context, self.share, access_rules=old_access_rules, add_rules=[], delete_rules=[]) qb_deny_mock.assert_called_once_with(self._context, self.share, (missing_access_1)) qb_exist_mock.assert_called_once_with(self._context, self.share) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") def test_update_access_recovery_add_superfluous(self, qb_allow_mock, qb_deny_mock, qb_exist_mock, qb_subtr_mock): new_access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5") old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.1") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.3") old_access_3 = create_fake_access(access_id="old_3", access_adr="10.0.0.4") miss_access_1 = create_fake_access(access_id="old_3", access_adr="10.0.0.4") new_access_2 = create_fake_access(access_id="new_2", access_adr="10.0.0.3", access_level="ro") new_access_rules = [new_access_1, old_access_1, old_access_2, old_access_3, new_access_2] qb_exist_mock.return_value = [old_access_1, old_access_2, old_access_3, miss_access_1] qb_subtr_mock.side_effect = [[new_access_1, new_access_2], [miss_access_1, old_access_2]] self._driver.update_access(self._context, self.share, new_access_rules, add_rules=[], delete_rules=[]) a_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] qb_allow_mock.assert_has_calls(a_calls) b_calls = [mock.call(self._context, self.share, miss_access_1), mock.call(self._context, self.share, old_access_2)] qb_deny_mock.assert_has_calls(b_calls) qb_exist_mock.assert_called_once_with(self._context, self.share) manila-2.0.0/manila/tests/share/drivers/quobyte/__init__.py0000664000567000056710000000000012701407107025064 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/quobyte/test_jsonrpc.py0000664000567000056710000003342412701407107026062 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Quobyte, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import ssl import tempfile import time import mock from oslo_serialization import jsonutils import six from six.moves import http_client from manila import exception from manila.share.drivers.quobyte import jsonrpc from manila import test class FakeResponse(object): def __init__(self, status, body): self.status = status self.reason = "HTTP reason" self._body = body def read(self): return self._body class QuobyteBasicAuthCredentialsTestCase(test.TestCase): def test_get_authorization_header(self): creds = jsonrpc.BasicAuthCredentials('fakeuser', 'fakepwd') self.assertEqual('BASIC ZmFrZXVzZXI6ZmFrZXB3ZA==', creds.get_authorization_header()) class QuobyteHttpsConnectionWithCaVerificationTestCase(test.TestCase): @mock.patch.object(socket, "create_connection", return_value="fake_socket") @mock.patch.object(ssl, "wrap_socket") def test_https_with_ca_connect(self, mock_ssl, mock_cc): key_file = tempfile.TemporaryFile() cert_file = tempfile.gettempdir() ca_file = tempfile.gettempdir() mycon = (jsonrpc. HTTPSConnectionWithCaVerification(host="localhost", key_file=key_file, cert_file=cert_file, ca_file=ca_file, port=1234, timeout=999)) mycon.connect() mock_cc.assert_called_once_with(("localhost", 1234), 999) mock_ssl.assert_called_once_with("fake_socket", keyfile=key_file, certfile=cert_file, ca_certs=ca_file, cert_reqs=mock.ANY) @mock.patch.object(http_client.HTTPConnection, "_tunnel") @mock.patch.object(socket, "create_connection", return_value="fake_socket") @mock.patch.object(ssl, "wrap_socket") def test_https_with_ca_connect_tunnel(self, mock_ssl, mock_cc, mock_tunnel): key_file = tempfile.TemporaryFile() cert_file = tempfile.gettempdir() ca_file = tempfile.gettempdir() mycon = (jsonrpc. HTTPSConnectionWithCaVerification(host="localhost", key_file=key_file, cert_file=cert_file, ca_file=ca_file, port=1234, timeout=999)) mycon._tunnel_host = "fake_tunnel_host" mycon.connect() mock_tunnel.assert_called_once_with() mock_cc.assert_called_once_with(("localhost", 1234), 999) mock_ssl.assert_called_once_with("fake_socket", keyfile=key_file, certfile=cert_file, ca_certs=ca_file, cert_reqs=mock.ANY) class QuobyteJsonRpcTestCase(test.TestCase): def setUp(self): super(QuobyteJsonRpcTestCase, self).setUp() self.rpc = jsonrpc.JsonRpc(url="http://test", user_credentials=("me", "team")) self.mock_object(self.rpc, '_connection') self.mock_object(time, 'sleep') def test_request_generation_and_basic_auth(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse(200, '{"result":"yes"}'))) self.rpc.call('method', {'param': 'value'}) self.rpc._connection.request.assert_called_once_with( 'POST', 'http://test/', jsonutils.dumps({'jsonrpc': '2.0', 'method': 'method', 'params': {'retry': 'INFINITELY', 'param': 'value'}, 'id': '1'}), dict(Authorization=jsonrpc.BasicAuthCredentials("me", "team") .get_authorization_header())) @mock.patch.object(jsonrpc.HTTPSConnectionWithCaVerification, '__init__', return_value=None) def test_jsonrpc_init_with_ca(self, mock_init): foofile = tempfile.TemporaryFile() self.rpc = jsonrpc.JsonRpc("https://foo.bar/", ('fakeuser', 'fakepwd'), foofile) mock_init.assert_called_once_with("foo.bar", ca_file=foofile.name) @mock.patch.object(jsonrpc.LOG, "warning") def test_jsonrpc_init_without_ca(self, mock_warning): self.rpc = jsonrpc.JsonRpc("https://foo.bar/", ('fakeuser', 'fakepwd'), None) mock_warning.assert_called_once_with( "Will not verify the server certificate of the API service" " because the CA certificate is not available.") @mock.patch.object(http_client.HTTPConnection, '__init__', return_value=None) def test_jsonrpc_init_no_ssl(self, mock_init): self.rpc = jsonrpc.JsonRpc("http://foo.bar/", ('fakeuser', 'fakepwd')) mock_init.assert_called_once_with("foo.bar") def test_successful_call(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse( 200, '{"result":"Sweet gorilla of Manila"}'))) result = self.rpc.call('method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() self.assertEqual("Sweet gorilla of Manila", result) @mock.patch('six.moves.http_client.HTTPSConnection') def test_jsonrpc_call_ssl_disable(self, mock_connection): mock_connection.return_value = self.rpc._connection self.mock_object( self.rpc._connection, 'request', mock.Mock(side_effect=ssl.SSLError)) self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse( 403, '{"error":{"code":28,"message":"text"}}'))) self.mock_object(jsonrpc.LOG, 'warning') self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) self.assertTrue(self.rpc._disabled_cert_verification) jsonrpc.LOG.warning.assert_called_once_with( "Could not verify server certificate of " "API service against CA.") def test_jsonrpc_call_ssl_error(self): """This test succeeds if a specific exception is thrown. Throwing a different exception or none at all is a failure in this specific test case. """ self.mock_object( self.rpc._connection, 'request', mock.Mock(side_effect=ssl.SSLError)) self.rpc._disabled_cert_verification = True try: self.rpc.call('method', {'param': 'value'}) except exception.QBException as me: self.rpc._connection.connect.assert_called_once_with() (self.assertTrue(six.text_type(me).startswith ('Client SSL subsystem returned error:'))) except Exception as e: self.fail('Unexpected exception thrown: %s' % e) else: self.fail('Expected exception not thrown') def test_jsonrpc_call_bad_status_line(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(side_effect=http_client.BadStatusLine("fake_line"))) self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) def test_jsonrpc_call_http_exception(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(side_effect=http_client.HTTPException)) self.mock_object(jsonrpc.LOG, 'warning') self.assertRaises(http_client.HTTPException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() jsonrpc.LOG.warning.assert_has_calls([]) def test_jsonrpc_call_socket_error(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(side_effect=socket.error(23, "Test"))) self.mock_object(jsonrpc.LOG, 'warning') self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() jsonrpc.LOG.warning.assert_has_calls([]) def test_jsonrpc_call_http_exception_retry(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(side_effect=http_client.HTTPException)) self.mock_object(jsonrpc.LOG, 'warning') self.rpc._fail_fast = False self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() jsonrpc.LOG.warning.assert_called_with( "Encountered error, retrying: %s", "") def test_jsonrpc_call_no_connect(self): orig_retries = jsonrpc.CONNECTION_RETRIES jsonrpc.CONNECTION_RETRIES = 0 try: self.rpc.call('method', {'param': 'value'}) except exception.QBException as me: self.rpc._connection.connect.assert_called_once_with() self.assertEqual("Unable to connect to backend after 0 retries", six.text_type(me)) else: self.fail('Expected exception not thrown') finally: jsonrpc.CONNECTION_RETRIES = orig_retries def test_http_error_401(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse(401, ''))) self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() def test_http_error_other(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse(300, ''))) self.assertRaises(exception.QBException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() self.assertTrue(self.rpc._connection.getresponse.called) def test_application_error(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse( 200, '{"error":{"code":28,"message":"text"}}'))) self.assertRaises(exception.QBRpcException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() self.assertTrue(self.rpc._connection.getresponse.called) def test_broken_application_error(self): self.mock_object( self.rpc._connection, 'getresponse', mock.Mock(return_value=FakeResponse( 200, '{"error":{"code":28,"message":"text"}}'))) self.assertRaises(exception.QBRpcException, self.rpc.call, 'method', {'param': 'value'}) self.rpc._connection.connect.assert_called_once_with() self.assertTrue(self.rpc._connection.getresponse.called) def test_checked_for_application_error(self): resultdict = {"result": "Sweet gorilla of Manila"} self.assertEqual("Sweet gorilla of Manila", (self.rpc. _checked_for_application_error(result=resultdict)) ) def test_checked_for_application_error_no_entry(self): resultdict = {"result": "Sweet gorilla of Manila", "error": {"message": "No Gorilla", "code": jsonrpc.ERROR_ENOENT}} self.assertIsNone( self.rpc._checked_for_application_error(result=resultdict)) def test_checked_for_application_error_exception(self): self.assertRaises(exception.QBRpcException, self.rpc._checked_for_application_error, {"result": "Sweet gorilla of Manila", "error": {"message": "No Gorilla", "code": 666 } } ) manila-2.0.0/manila/tests/share/drivers/test_lvm.py0000664000567000056710000005221212701407107023506 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the LVM driver module.""" import os import ddt import mock from oslo_config import cfg from manila.common import constants as const from manila import context from manila import exception from manila.share import configuration from manila.share.drivers import lvm from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_utils from manila.tests.share.drivers import test_generic CONF = cfg.CONF def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_snapshot(**kwargs): snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', 'share': {'size': 1}, } snapshot.update(kwargs) return db_fakes.FakeModel(snapshot) def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) @ddt.ddt class LVMShareDriverTestCase(test.TestCase): """Tests LVMShareDriver.""" def setUp(self): super(LVMShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._context = context.get_admin_context() CONF.set_default('lvm_share_volume_group', 'fakevg') CONF.set_default('lvm_share_export_ip', '10.0.0.1') CONF.set_default('driver_handles_share_servers', False) CONF.set_default('reserved_share_percentage', 50) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() self.fake_conf = configuration.Configuration(None) self._db = mock.Mock() self._os = lvm.os = mock.Mock() self._os.path.join = os.path.join self._driver = lvm.LVMShareDriver(self._db, configuration=self.fake_conf) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share() self.access = fake_access() self.snapshot = fake_snapshot() self.server = { 'public_address': self.fake_conf.lvm_share_export_ip, 'instance_id': 'LVM', 'lock_name': 'manila_lvm', } # Used only to test compatibility with share manager self.share_server = "fake_share_server" def tearDown(self): super(LVMShareDriverTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_do_setup(self): CONF.set_default('lvm_share_helpers', ['NFS=fakenfs']) lvm.importutils = mock.Mock() lvm.importutils.import_class.return_value = self._helper_nfs self._driver.do_setup(self._context) lvm.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) def test_check_for_setup_error(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' expected_exec = ['vgs --noheadings -o name'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self._driver.check_for_setup_error() self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_check_for_setup_error_no_vg(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake0\n fake1\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_no_export_ip(self): def exec_runner(*ignore_args, **ignore_kwargs): return '\n fake1\n fakevg\n fake2\n', '' fake_utils.fake_execute_set_repliers([('vgs --noheadings -o name', exec_runner)]) CONF.set_default('lvm_share_export_ip', None) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_local_path_normal(self): share = fake_share(name='fake_sharename') CONF.set_default('lvm_share_volume_group', 'fake_vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake_vg-fake_sharename', ret) def test_local_path_escapes(self): share = fake_share(name='fake-sharename') CONF.set_default('lvm_share_volume_group', 'fake-vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake--vg-fake--sharename', ret) def test_create_share(self): self._helper_nfs.create_export.return_value = 'fakelocation' self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, self.share, self.share_server) CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device.assert_called_with( self.share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual('fakelocation', ret) def test_create_share_from_snapshot(self): CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device = mock.Mock() snapshot_instance = { 'snapshot_id': 'fakesnapshotid', 'name': 'fakename' } mount_share = '/dev/mapper/fakevg-fakename' mount_snapshot = '/dev/mapper/fakevg-fakename' self._helper_nfs.create_export.return_value = 'fakelocation' self._driver.create_share_from_snapshot(self._context, self.share, snapshot_instance, self.share_server) self._driver._mount_device.assert_called_with(self.share, mount_snapshot) expected_exec = [ 'lvcreate -L 1G -n fakename fakevg', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ("dd count=0 if=%s of=%s iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ("dd if=%s of=%s count=1024 bs=1M iflag=direct oflag=direct" % (mount_snapshot, mount_share)), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_create_share_mirrors(self): share = fake_share(size='2048') CONF.set_default('lvm_share_mirrors', 2) self._helper_nfs.create_export.return_value = 'fakelocation' self._driver._mount_device = mock.Mock() ret = self._driver.create_share(self._context, share, self.share_server) self._driver._mount_device.assert_called_with( share, '/dev/mapper/fakevg-fakename') expected_exec = [ 'lvcreate -L 2048G -n fakename fakevg -m 2 --nosync -R 2', 'mkfs.ext4 /dev/mapper/fakevg-fakename', ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual('fakelocation', ret) def test_deallocate_container(self): expected_exec = ['lvremove -f fakevg/fakename'] self._driver._deallocate_container(self.share['name']) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_deallocate_container_error(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="error") self.mock_object(self._driver, '_try_execute', _fake_exec) self.assertRaises(exception.ProcessExecutionError, self._driver._deallocate_container, self.share['name']) def test_deallocate_container_not_found_error(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="not found") self.mock_object(self._driver, '_try_execute', _fake_exec) self._driver._deallocate_container(self.share['name']) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats()) self.assertFalse(self._driver._update_share_stats.called) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats_refresh(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats(refresh=True)) self._driver._update_share_stats.assert_called_once_with() def test_remove_export(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) expected_exec = [ "umount -f %s" % (mount_path,), ] self._os.path.exists.assert_called_with(mount_path) self._os.rmdir.assert_called_with(mount_path) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_is_busy_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='device is busy') self._os.path.exists.return_value = True mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ShareBusyException, self._driver._remove_export, self._context, self.share) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_error(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='fake error') mount_path = self._get_mount_path(self.share) expected_exec = [ "umount -f %s" % (mount_path), ] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self._os.path.exists.return_value = True self._driver._remove_export(self._context, self.share) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_remove_export_rmdir_error(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self.mock_object(self._os, 'rmdir', mock.Mock(side_effect=OSError)) self._driver._remove_export(self._context, self.share) expected_exec = [ "umount -f %s" % (mount_path,), ] self._os.path.exists.assert_called_with(mount_path) self._os.rmdir.assert_called_with(mount_path) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_create_snapshot(self): self._driver.create_snapshot(self._context, self.snapshot, self.share_server) expected_exec = [ ("lvcreate -L 1G --name fakesnapshotname --snapshot " "%s/fakename" % (CONF.lvm_share_volume_group,)), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_ensure_share(self): device_name = '/dev/mapper/fakevg-fakename' with mock.patch.object(self._driver, '_mount_device', mock.Mock(return_value='fake_location')): self._driver.ensure_share(self._context, self.share, self.share_server) self._driver._mount_device.assert_called_with(self.share, device_name) self._helper_nfs.create_export.assert_called_once_with( self.server, self.share['name'], recreate=True) def test_delete_share(self): mount_path = self._get_mount_path(self.share) self._helper_nfs.remove_export(mount_path, self.share['name']) self._driver._delete_share(self._context, self.share) def test_delete_snapshot(self): expected_exec = ['lvremove -f fakevg/fakesnapshotname'] self._driver.delete_snapshot(self._context, self.snapshot, self.share_server) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_delete_share_invalid_share(self): self._driver._get_helper = mock.Mock( side_effect=exception.InvalidShare(reason='fake')) self._driver.delete_share(self._context, self.share, self.share_server) def test_delete_share_process_execution_error(self): self.mock_object( self._helper_nfs, 'remove_export', mock.Mock(side_effect=exception.ProcessExecutionError)) self._driver._delete_share(self._context, self.share) self._helper_nfs.remove_export.assert_called_once_with( self.server, self.share['name']) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', access_level), ] add_rules = [test_generic.get_fake_access_rule( '2.2.2.2', access_level), ] delete_rules = [test_generic.get_fake_access_rule( '3.3.3.3', access_level), ] self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, share_server=self.server) (self._driver._helpers[self.share['share_proto']]. update_access.assert_called_once_with( self.server, self.share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules)) def test_mount_device(self): mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') expected_exec = [ "mkdir -p %s" % (mount_path,), "mount fakedevice %s" % (mount_path,), "chmod 777 %s" % (mount_path,), ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertEqual(mount_path, ret) def test_mount_device_already(self): def exec_runner(*args, **kwargs): if 'mount' in args and '-l' not in args: raise exception.ProcessExecutionError() else: return 'fakedevice', '' self.mock_object(self._driver, '_execute', exec_runner) mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') self.assertEqual(mount_path, ret) def test_mount_device_error(self): def exec_runner(*args, **kwargs): if 'mount' in args and '-l' not in args: raise exception.ProcessExecutionError() else: return 'fake', '' self.mock_object(self._driver, '_execute', exec_runner) self.assertRaises(exception.ProcessExecutionError, self._driver._mount_device, self.share, 'fakedevice') def test_get_helper(self): share_cifs = fake_share(share_proto='CIFS') share_nfs = fake_share(share_proto='NFS') share_fake = fake_share(share_proto='FAKE') self.assertEqual(self._driver._get_helper(share_cifs), self._helper_cifs) self.assertEqual(self._driver._get_helper(share_nfs), self._helper_nfs) self.assertRaises(exception.InvalidShare, self._driver._get_helper, share_fake) def _get_mount_path(self, share): return os.path.join(CONF.lvm_share_export_root, share['name']) def test_unmount_device(self): mount_path = self._get_mount_path(self.share) self.mock_object(self._driver, '_execute') self._driver._unmount_device(self.share) self._driver._execute.assert_any_call('umount', mount_path, run_as_root=True) self._driver._execute.assert_any_call('rmdir', mount_path, run_as_root=True) def test_extend_share(self): local_path = self._driver._get_local_path(self.share) self.mock_object(self._driver, '_extend_container') self.mock_object(self._driver, '_execute') self._driver.extend_share(self.share, 3) self._driver._extend_container.assert_called_once_with(self.share, local_path, 3) self._driver._execute.assert_called_once_with('resize2fs', local_path, run_as_root=True) def test_ssh_exec_as_root(self): command = ['fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with('fake_command', check_exit_code=True) def test_ssh_exec_as_root_with_sudo(self): command = ['sudo', 'fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with( 'fake_command', run_as_root=True, check_exit_code=True) def test_extend_container(self): self.mock_object(self._driver, '_try_execute') self._driver._extend_container(self.share, 'device_name', 3) self._driver._try_execute.assert_called_once_with( 'lvextend', '-L', '3G', '-n', 'device_name', run_as_root=True) def test_get_share_server_pools(self): expected_result = [{ 'pool_name': 'lvm-single-pool', 'total_capacity_gb': 33, 'free_capacity_gb': 22, 'reserved_percentage': 0, }, ] self.mock_object( self._driver, '_execute', mock.Mock(return_value=("VSize 33g VFree 22g", None))) self.assertEqual(expected_result, self._driver.get_share_server_pools()) self._driver._execute.assert_called_once_with( 'vgs', 'fakevg', '--rows', '--units', 'g', run_as_root=True) def test_copy_volume_error(self): def _fake_exec(*args, **kwargs): if 'count=0' in args: raise exception.ProcessExecutionError() self.mock_object(self._driver, '_execute', mock.Mock(side_effect=_fake_exec)) self._driver._copy_volume('src', 'dest', 1) self._driver._execute.assert_any_call('dd', 'count=0', 'if=src', 'of=dest', 'iflag=direct', 'oflag=direct', run_as_root=True) self._driver._execute.assert_any_call('dd', 'if=src', 'of=dest', 'count=1024', 'bs=1M', run_as_root=True) def test_update_share_stats(self): self.mock_object(self._driver, 'get_share_server_pools', mock.Mock(return_value='test-pool')) self._driver._update_share_stats() self.assertEqual('LVM', self._driver._stats['share_backend_name']) self.assertEqual('NFS_CIFS', self._driver._stats['storage_protocol']) self.assertEqual(50, self._driver._stats['reserved_percentage']) self.assertIsNone(self._driver._stats['consistency_group_support']) self.assertEqual(True, self._driver._stats['snapshot_support']) self.assertEqual('LVMShareDriver', self._driver._stats['driver_name']) self.assertEqual('test-pool', self._driver._stats['pools']) manila-2.0.0/manila/tests/share/drivers/__init__.py0000664000567000056710000000000012701407107023374 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hpe/0000775000567000056710000000000012701407265022056 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_constants.py0000664000567000056710000000661712701407107027271 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. CIFS = 'CIFS' SMB_LOWER = 'smb' NFS = 'NFS' NFS_LOWER = 'nfs' IP = 'ip' USER = 'user' USERNAME = 'USERNAME_0' PASSWORD = 'PASSWORD_0' READ_WRITE = 'rw' READ_ONLY = 'ro' SAN_LOGIN = 'testlogin4san' SAN_PASSWORD = 'testpassword4san' API_URL = 'https://1.2.3.4:8080/api/v1' TIMEOUT = 60 PORT = 22 SHARE_TYPE_ID = 123456789 CIDR_PREFIX = '24' # Constants to use with Mock and expect in results EXPECTED_IP_10203040 = '10.20.30.40' EXPECTED_IP_1234 = '1.2.3.4' EXPECTED_IP_127 = '127.0.0.1' EXPECTED_IP_127_2 = '127.0.0.2' EXPECTED_ACCESS_LEVEL = 'foo_access' EXPECTED_SUBNET = '255.255.255.0' # based on CIDR_PREFIX above EXPECTED_VLAN_TYPE = 'vlan' EXPECTED_VLAN_TAG = '101' EXPECTED_SERVER_ID = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' EXPECTED_PROJECT_ID = 'osf-nfs-project-id' EXPECTED_SHARE_ID = 'osf-share-id' EXPECTED_SHARE_NAME = 'share-name' EXPECTED_HOST = 'hostname@backend#pool' EXPECTED_SHARE_PATH = '/anyfpg/anyvfs/anyfstore' EXPECTED_SIZE_1 = 1 EXPECTED_SIZE_2 = 2 EXPECTED_SNAP_NAME = 'osf-snap-name' EXPECTED_SNAP_ID = 'osf-snap-id' EXPECTED_STATS = {'test': 'stats'} EXPECTED_FPG = 'FPG_1' EXPECTED_FSTORE = EXPECTED_PROJECT_ID EXPECTED_VFS = 'test_vfs' EXPECTED_HPE_DEBUG = True EXPECTED_COMMENT = "OpenStack Manila - foo-comment" EXPECTED_EXTRA_SPECS = {} EXPECTED_LOCATION = ':'.join((EXPECTED_IP_1234, EXPECTED_SHARE_PATH)) EXPECTED_SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' EXPECTED_SUPER_SHARE_COMMENT = ('OpenStack super share used to delete nested ' 'shares.') EXPECTED_CIFS_DOMAIN = 'LOCAL_CLUSTER' EXPECTED_MOUNT_PATH = '/mnt/' GET_FSQUOTA = {'message': None, 'total': 1, 'members': [{'hardBlock': '1024', 'softBlock': '1024'}]} EXPECTED_FSIP = { 'fspool': EXPECTED_FPG, 'vfs': EXPECTED_VFS, 'address': EXPECTED_IP_1234, 'prefixLen': EXPECTED_SUBNET, 'vlanTag': EXPECTED_VLAN_TAG, } OTHER_FSIP = { 'fspool': EXPECTED_FPG, 'vfs': EXPECTED_VFS, 'address': '9.9.9.9', 'prefixLen': EXPECTED_SUBNET, 'vlanTag': EXPECTED_VLAN_TAG, } NFS_SHARE_INFO = { 'project_id': EXPECTED_PROJECT_ID, 'id': EXPECTED_SHARE_ID, 'share_proto': NFS, 'export_location': EXPECTED_LOCATION, 'size': 1234, } ACCESS_INFO = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_WRITE, } SNAPSHOT_INFO = { 'name': EXPECTED_SNAP_NAME, 'id': EXPECTED_SNAP_ID, 'share': { 'project_id': EXPECTED_PROJECT_ID, 'id': EXPECTED_SHARE_ID, 'share_proto': NFS, 'export_location': EXPECTED_LOCATION, }, } SNAPSHOT_INSTANCE = { 'name': EXPECTED_SNAP_NAME, 'id': EXPECTED_SNAP_ID, 'share_id': EXPECTED_SHARE_ID, 'share_proto': NFS, } class FakeException(Exception): pass FAKE_EXCEPTION = FakeException("Fake exception for testing.") manila-2.0.0/manila/tests/share/drivers/hpe/__init__.py0000664000567000056710000000000012701407107024150 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_mediator.py0000664000567000056710000033106012701407107027052 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import ddt import mock if 'hpe3parclient' not in sys.modules: sys.modules['hpe3parclient'] = mock.Mock() from manila import exception from manila.share.drivers.hpe import hpe_3par_mediator as hpe3parmediator from manila import test from manila.tests.share.drivers.hpe import test_hpe_3par_constants as constants from manila import utils from oslo_utils import units import six CLIENT_VERSION_MIN_OK = hpe3parmediator.MIN_CLIENT_VERSION TEST_WSAPI_VERSION_STR = '30201292' @ddt.ddt class HPE3ParMediatorTestCase(test.TestCase): def setUp(self): super(HPE3ParMediatorTestCase, self).setUp() # This is the fake client to use. self.mock_client = mock.Mock() # Take over the hpe3parclient module and stub the constructor. hpe3parclient = sys.modules['hpe3parclient'] hpe3parclient.version_tuple = CLIENT_VERSION_MIN_OK # Need a fake constructor to return the fake client. # This is also be used for constructor error tests. self.mock_object(hpe3parclient.file_client, 'HPE3ParFilePersonaClient') self.mock_client_constructor = ( hpe3parclient.file_client.HPE3ParFilePersonaClient ) self.mock_client = self.mock_client_constructor() # Set the mediator to use in tests. self.mediator = hpe3parmediator.HPE3ParMediator( hpe3par_username=constants.USERNAME, hpe3par_password=constants.PASSWORD, hpe3par_api_url=constants.API_URL, hpe3par_debug=constants.EXPECTED_HPE_DEBUG, hpe3par_san_ip=constants.EXPECTED_IP_1234, hpe3par_san_login=constants.SAN_LOGIN, hpe3par_san_password=constants.SAN_PASSWORD, hpe3par_san_ssh_port=constants.PORT, hpe3par_share_ip_address=constants.EXPECTED_IP_10203040, hpe3par_cifs_admin_access_username=constants.USERNAME, hpe3par_cifs_admin_access_password=constants.PASSWORD, hpe3par_cifs_admin_access_domain=constants.EXPECTED_CIFS_DOMAIN, hpe3par_share_mount_path=constants.EXPECTED_MOUNT_PATH, ssh_conn_timeout=constants.TIMEOUT) def test_mediator_no_client(self): """Test missing hpe3parclient error.""" mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mock_object(hpe3parmediator.HPE3ParMediator, 'no_client', None) self.assertRaises(exception.HPE3ParInvalidClient, self.mediator.do_setup) mock_log.error.assert_called_once_with(mock.ANY) def test_mediator_setup_client_init_error(self): """Any client init exceptions should result in a ManilaException.""" self.mock_client_constructor.side_effect = ( Exception('Any exception. E.g., bad version or some other ' 'non-Manila Exception.')) self.assertRaises(exception.ManilaException, self.mediator.do_setup) def test_mediator_setup_client_ssh_error(self): # This could be anything the client comes up with, but the # mediator should turn it into a ManilaException. non_manila_exception = Exception('non-manila-except') self.mock_client.setSSHOptions.side_effect = non_manila_exception self.assertRaises(exception.ManilaException, self.mediator.do_setup) self.mock_client.assert_has_calls( [mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT)]) def test_mediator_vfs_exception(self): """Backend exception during get_vfs_name.""" self.init_mediator() self.mock_client.getvfs.side_effect = Exception('non-manila-except') self.assertRaises(exception.ManilaException, self.mediator.get_vfs_name, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.getvfs(fpg=constants.EXPECTED_FPG, vfs=None), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_vfs_not_found(self): """VFS not found.""" self.init_mediator() self.mock_client.getvfs.return_value = {'total': 0} self.assertRaises(exception.ManilaException, self.mediator.get_vfs_name, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.getvfs(fpg=constants.EXPECTED_FPG, vfs=None), ] self.mock_client.assert_has_calls(expected_calls) def init_mediator(self): """Basic mediator setup for re-use with tests that need one.""" self.mock_client.getWsApiVersion.return_value = { 'build': TEST_WSAPI_VERSION_STR, } self.mock_client.getvfs.return_value = { 'total': 1, 'members': [{'vfsname': constants.EXPECTED_VFS}] } self.mock_client.getfshare.return_value = { 'total': 1, 'members': [ {'fstoreName': constants.EXPECTED_FSTORE, 'shareName': constants.EXPECTED_SHARE_ID, 'shareDir': constants.EXPECTED_SHARE_PATH, 'share_proto': constants.NFS, 'sharePath': constants.EXPECTED_SHARE_PATH, 'comment': constants.EXPECTED_COMMENT, }] } self.mock_client.setfshare.return_value = [] self.mock_client.setfsquota.return_value = [] self.mock_client.getfsquota.return_value = constants.GET_FSQUOTA self.mediator.do_setup() def test_mediator_setup_success(self): """Do a mediator setup without errors.""" self.init_mediator() self.assertIsNotNone(self.mediator._client) expected_calls = [ mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT), mock.call.getWsApiVersion(), mock.call.debug_rest(constants.EXPECTED_HPE_DEBUG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_login_error(self): """Test exception during login.""" self.init_mediator() self.mock_client.login.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator._wsapi_login) expected_calls = [mock.call.login(constants.USERNAME, constants.PASSWORD)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_logout_error(self): """Test exception during logout.""" self.init_mediator() mock_log = self.mock_object(hpe3parmediator, 'LOG') fake_exception = constants.FAKE_EXCEPTION self.mock_client.http.unauthenticate.side_effect = fake_exception self.mediator._wsapi_logout() # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) expected_calls = [mock.call.http.unauthenticate()] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_version_unsupported(self): """Try a client with version less than minimum.""" self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = (CLIENT_VERSION_MIN_OK[0], CLIENT_VERSION_MIN_OK[1], CLIENT_VERSION_MIN_OK[2] - 1) mock_log = self.mock_object(hpe3parmediator, 'LOG') self.assertRaises(exception.HPE3ParInvalidClient, self.init_mediator) mock_log.error.assert_called_once_with(mock.ANY) def test_mediator_client_version_supported(self): """Try a client with a version greater than the minimum.""" # The setup success already tests the min version. Try version > min. self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = (CLIENT_VERSION_MIN_OK[0], CLIENT_VERSION_MIN_OK[1], CLIENT_VERSION_MIN_OK[2] + 1) self.init_mediator() expected_calls = [ mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT), mock.call.getWsApiVersion(), mock.call.debug_rest(constants.EXPECTED_HPE_DEBUG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_version_exception(self): """Test the getWsApiVersion exception handling.""" self.mock_client.getWsApiVersion.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.init_mediator) def test_mediator_client_version_bad_return_value(self): """Test the getWsApiVersion exception handling with bad value.""" # Expecting a dict with 'build' in it. This would fail badly. self.mock_client.getWsApiVersion.return_value = 'bogus' self.assertRaises(exception.ShareBackendException, self.mediator.do_setup) def get_expected_calls_for_create_share(self, client_version, expected_fpg, expected_vfsname, expected_protocol, extra_specs, expected_project_id, expected_share_id): expected_sharedir = expected_share_id createfshare_kwargs = dict(comment=mock.ANY, fpg=expected_fpg, sharedir=expected_sharedir, fstore=expected_project_id) if expected_protocol == constants.NFS_LOWER: createfshare_kwargs['clientip'] = '127.0.0.1' # Options from extra-specs. opt_string = extra_specs.get('hpe3par:nfs_options', []) opt_list = opt_string.split(',') # Options that the mediator adds. nfs_options = ['rw', 'no_root_squash', 'insecure'] nfs_options += opt_list expected_options = ','.join(nfs_options) createfshare_kwargs['options'] = OptionMatcher( self.assertListEqual, expected_options) expected_calls = [ mock.call.createfstore(expected_vfsname, expected_project_id, comment=mock.ANY, fpg=expected_fpg), mock.call.getfsquota(fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id), mock.call.setfsquota(expected_vfsname, fpg=expected_fpg, hcapacity='2048', scapacity='2048', fstore=expected_project_id), mock.call.createfshare(expected_protocol, expected_vfsname, expected_share_id, **createfshare_kwargs), mock.call.getfshare(expected_protocol, expected_share_id, fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id)] else: smb_opts = (hpe3parmediator.ACCESS_BASED_ENUM, hpe3parmediator.CONTINUOUS_AVAIL, hpe3parmediator.CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value: opt_key = hpe3parmediator.SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value expected_calls = [ mock.call.createfstore(expected_vfsname, expected_project_id, comment=mock.ANY, fpg=expected_fpg), mock.call.getfsquota(fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id), mock.call.setfsquota(expected_vfsname, fpg=expected_fpg, hcapacity='2048', scapacity='2048', fstore=expected_project_id), mock.call.createfshare(expected_protocol, expected_vfsname, expected_share_id, **createfshare_kwargs), mock.call.getfshare(expected_protocol, expected_share_id, fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id)] return expected_calls @staticmethod def _build_smb_extra_specs(**kwargs): extra_specs = {'driver_handles_share_servers': False} for k, v in kwargs.items(): extra_specs['hpe3par:smb_%s' % k] = v return extra_specs @ddt.data(((4, 0, 0), None, None, None), ((4, 0, 0), 'true', None, None), ((4, 0, 0), None, 'false', None), ((4, 0, 0), None, 'false', None), ((4, 0, 0), None, None, 'optimized'), ((4, 0, 0), 'true', 'false', 'optimized')) @ddt.unpack def test_mediator_create_cifs_share(self, client_version, abe, ca, cache): self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = client_version self.init_mediator() self.mock_client.getfshare.return_value = { 'message': None, 'total': 1, 'members': [{'shareName': constants.EXPECTED_SHARE_NAME}] } extra_specs = self._build_smb_extra_specs(access_based_enum=abe, continuous_avail=ca, cache=cache) location = self.mediator.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertEqual(constants.EXPECTED_SHARE_NAME, location) expected_calls = self.get_expected_calls_for_create_share( client_version, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.SMB_LOWER, extra_specs, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID) self.mock_client.assert_has_calls(expected_calls) @ddt.data('ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'hide,insecure,no_wdelay,ro,bogus,root_squash,test') def test_mediator_create_nfs_share_bad_options(self, nfs_options): self.init_mediator() extra_specs = {'hpe3par:nfs_options': nfs_options} self.assertRaises(exception.InvalidInput, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertFalse(self.mock_client.createfshare.called) @ddt.data('sync', 'no_wdelay,sec=sys,hide,sync') def test_mediator_create_nfs_share(self, nfs_options): self.init_mediator() self.mock_client.getfshare.return_value = { 'message': None, 'total': 1, 'members': [{'sharePath': constants.EXPECTED_SHARE_PATH}] } extra_specs = {'hpe3par:nfs_options': nfs_options} location = self.mediator.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertEqual(constants.EXPECTED_SHARE_PATH, location) expected_calls = self.get_expected_calls_for_create_share( hpe3parmediator.MIN_CLIENT_VERSION, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.NFS.lower(), extra_specs, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID) self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_nfs_share_get_exception(self): self.init_mediator() self.mock_client.getfshare.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) @ddt.data(0, 2) def test_mediator_create_nfs_share_get_fail(self, count): self.init_mediator() self.mock_client.getfshare.return_value = {'total': count} self.assertRaises(exception.ShareBackendException, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) @ddt.data(True, False) def test_mediator_create_cifs_share_from_snapshot(self, require_cifs_ip): self.init_mediator() self.mediator.hpe3par_require_cifs_ip = require_cifs_ip self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } location = self.mediator.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.assertEqual(constants.EXPECTED_SHARE_ID, location) expected_kwargs = { 'comment': mock.ANY, 'fpg': constants.EXPECTED_FPG, 'fstore': constants.EXPECTED_FSTORE, 'sharedir': '.snapshot/%s/%s' % (constants.EXPECTED_SNAP_ID, constants.EXPECTED_SHARE_ID), } if require_cifs_ip: expected_kwargs['allowip'] = constants.EXPECTED_IP_127 expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_ID, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, **expected_kwargs), mock.call.getfshare(constants.SMB_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_nfs_share_from_snapshot(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } location = self.mediator.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.assertEqual(constants.EXPECTED_SHARE_PATH, location) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_ID, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, comment=mock.ANY, fpg=constants.EXPECTED_FPG, sharedir='.snapshot/%s/%s' % (constants.EXPECTED_SNAP_ID, constants.EXPECTED_SHARE_ID), fstore=constants.EXPECTED_FSTORE, clientip=constants.EXPECTED_IP_127_2, options='ro,no_root_squash,insecure'), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_share_from_snap_not_found(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 0, 'members': [] } self.assertRaises(exception.ShareBackendException, self.mediator.create_share_from_snapshot, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_nfs_share(self): self.init_mediator() share_id = 'foo' osf_share_id = '-'.join(('osf', share_id)) osf_ro_share_id = '-ro-'.join(('osf', share_id)) fstore = osf_share_id self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_super_share', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefshare(constants.NFS_LOWER, constants.EXPECTED_VFS, osf_share_id, fpg=constants.EXPECTED_FPG, fstore=fstore), mock.call.removefshare(constants.NFS_LOWER, constants.EXPECTED_VFS, osf_ro_share_id, fpg=constants.EXPECTED_FPG, fstore=fstore), mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, clientip=None, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), fpg=constants.EXPECTED_FPG, fstore=fstore, options='rw,no_root_squash,insecure', sharedir=''), mock.call.removefstore(constants.EXPECTED_VFS, fstore, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) expected_mount_path = constants.EXPECTED_MOUNT_PATH + osf_share_id self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.NFS_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, fstore) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_super_share.assert_called_with( expected_mount_path) def test_mediator_delete_share_not_found(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=None)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_super_share', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.assertFalse(self.mock_client.removefshare.called) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.SMB_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, None) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_super_share.assert_called_with( expected_mount_path) def test_mediator_delete_nfs_share_only_readonly(self): self.init_mediator() fstores = (None, constants.EXPECTED_FSTORE) self.mock_object(self.mediator, '_find_fstore', mock.Mock(side_effect=fstores)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_super_share', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.removefshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE ) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.NFS_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_PROJECT_ID) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_super_share.assert_called_with( expected_mount_path) def test_mediator_delete_share_exception(self): self.init_mediator() self.mock_client.removefshare.side_effect = Exception( 'removeshare fail.') self.assertRaises(exception.ShareBackendException, self.mediator.delete_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_delete_fstore_exception(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_SHARE_ID)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_super_share', mock.Mock(return_value={})) self.mock_client.removefstore.side_effect = Exception( 'removefstore fail.') self.assertRaises(exception.ShareBackendException, self.mediator.delete_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, allowip=None, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID, sharedir=''), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), allowperm=( '+' + constants.USERNAME + ':fullcontrol'), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.removefstore(constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.SMB_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_super_share.assert_called_with( expected_mount_path) def test_mediator_delete_cifs_share(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_SHARE_ID)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_super_share', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, allowip=None, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID, sharedir=''), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), allowperm=( '+' + constants.USERNAME + ':fullcontrol'), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.removefstore(constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.SMB_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_super_share.assert_called_with( expected_mount_path) def test_mediator_create_snapshot(self): self.init_mediator() self.mediator.create_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.createfsnap(constants.EXPECTED_VFS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SNAP_NAME, fpg=constants.EXPECTED_FPG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_snapshot_not_allowed(self): self.init_mediator() self.mock_client.getfshare.return_value['members'][0]['shareDir'] = ( None) self.mock_client.getfshare.return_value['members'][0]['sharePath'] = ( 'foo/.snapshot/foo') self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_create_snapshot_share_not_found(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_once_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_create_snapshot_backend_exception(self): self.init_mediator() # createfsnap exception self.mock_client.createfsnap.side_effect = Exception( 'createfsnap fail.') self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot(self): self.init_mediator() expected_name_from_array = 'name-from-array' self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [ { 'snapName': expected_name_from_array, 'fstoreName': constants.EXPECTED_PROJECT_ID, } ], 'message': None } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': '/anyfpg/anyvfs/anyfstore'}] }, { 'total': 1, 'members': [{'shareDir': []}], } ] self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.NFS_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.SMB_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.removefsnap(constants.EXPECTED_VFS, constants.EXPECTED_PROJECT_ID, fpg=constants.EXPECTED_FPG, snapname=expected_name_from_array), mock.call.startfsnapclean(constants.EXPECTED_FPG, reclaimStrategy='maxspeed') ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_delete_snapshot_not_found(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'total': 0, 'members': [], } self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_SHARE_ID), ] # Code coverage for early exit when nothing to delete. self.mock_client.assert_has_calls(expected_calls) self.assertFalse(self.mock_client.getfshare.called) self.assertFalse(self.mock_client.removefsnap.called) self.assertFalse(self.mock_client.startfsnapclean.called) def test_mediator_delete_snapshot_shared_nfs(self): self.init_mediator() # Mock a share under this snapshot for NFS snapshot_dir = '.snapshot/DT_%s' % constants.EXPECTED_SNAP_NAME snapshot_path = '%s/%s' % (constants.EXPECTED_SHARE_PATH, snapshot_dir) self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME}] } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': snapshot_path}], }, { 'total': 0, 'members': [], } ] self.assertRaises(exception.Invalid, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot_shared_smb(self): self.init_mediator() # Mock a share under this snapshot for SMB snapshot_dir = '.snapshot/DT_%s' % constants.EXPECTED_SNAP_NAME self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME}] } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': constants.EXPECTED_SHARE_PATH}], }, { 'total': 1, 'members': [{'shareDir': snapshot_dir}], } ] self.assertRaises(exception.Invalid, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def _assert_delete_snapshot_raises(self): self.assertRaises(exception.ShareBackendException, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot_backend_exceptions(self): self.init_mediator() # getfsnap exception self.mock_client.getfsnap.side_effect = Exception('getfsnap fail.') self._assert_delete_snapshot_raises() # getfsnap OK self.mock_client.getfsnap.side_effect = None self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME, 'fstoreName': constants.EXPECTED_FSTORE}] } # getfshare exception self.mock_client.getfshare.side_effect = Exception('getfshare fail.') self._assert_delete_snapshot_raises() # getfshare OK def mock_fshare(*args, **kwargs): if args[0] == constants.NFS_LOWER: return { 'total': 1, 'members': [{'sharePath': '/anyfpg/anyvfs/anyfstore', 'fstoreName': constants.EXPECTED_FSTORE}] } else: return { 'total': 1, 'members': [{'shareDir': [], 'fstoreName': constants.EXPECTED_FSTORE}] } self.mock_client.getfshare.side_effect = mock_fshare # removefsnap exception self.mock_client.removefsnap.side_effect = Exception( 'removefsnap fail.') self._assert_delete_snapshot_raises() # removefsnap OK self.mock_client.removefsnap.side_effect = None self.mock_client.removefsnap.return_value = [] # startfsnapclean exception (logged, not raised) self.mock_client.startfsnapclean.side_effect = Exception( 'startfsnapclean fail.') mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.getfshare(constants.NFS_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.getfshare(constants.SMB_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.removefsnap(constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, fpg=constants.EXPECTED_FPG, snapname=constants.EXPECTED_SNAP_NAME), mock.call.startfsnapclean(constants.EXPECTED_FPG, reclaimStrategy='maxspeed'), ] self.mock_client.assert_has_calls(expected_calls) self.assertTrue(mock_log.debug.called) self.assertTrue(mock_log.exception.called) @ddt.data(six.text_type('volname.1'), ['volname.2', 'volname.3']) def test_mediator_get_fpg_status(self, volume_name_or_list): """Mediator converts client stats to capacity result.""" expected_capacity = constants.EXPECTED_SIZE_2 expected_free = constants.EXPECTED_SIZE_1 self.init_mediator() self.mock_client.getfpg.return_value = { 'total': 1, 'members': [ { 'capacityKiB': str(expected_capacity * units.Mi), 'availCapacityKiB': str(expected_free * units.Mi), 'vvs': volume_name_or_list, } ], 'message': None, } self.mock_client.getfsquota.return_value = { 'total': 3, 'members': [ {'hardBlock': 1 * units.Ki}, {'hardBlock': 2 * units.Ki}, {'hardBlock': 3 * units.Ki}, ], 'message': None, } self.mock_client.getVolume.return_value = { 'provisioningType': hpe3parmediator.DEDUPE} expected_result = { 'free_capacity_gb': expected_free, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, 'dedupe': True, 'thin_provisioning': True, 'total_capacity_gb': expected_capacity, 'provisioned_capacity_gb': 6, } result = self.mediator.get_fpg_status(constants.EXPECTED_FPG) self.assertEqual(expected_result, result) expected_calls = [ mock.call.getfpg(constants.EXPECTED_FPG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_exception(self): """Exception during get_fpg_status call to getfpg.""" self.init_mediator() self.mock_client.getfpg.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_error(self): """Unexpected result from getfpg during get_fpg_status.""" self.init_mediator() self.mock_client.getfpg.return_value = {'total': 0} self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_bad_prov_type(self): """Test get_fpg_status handling of unexpected provisioning type.""" self.init_mediator() self.mock_client.getfpg.return_value = { 'total': 1, 'members': [ { 'capacityKiB': '1', 'availCapacityKiB': '1', 'vvs': 'foo', } ], 'message': None, } self.mock_client.getVolume.return_value = { 'provisioningType': 'BOGUS'} self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_provisioned_error(self): """Test error during get provisioned GB.""" self.init_mediator() error_return = {'message': 'Some error happened.'} self.mock_client.getfsquota.return_value = error_return self.assertRaises(exception.ShareBackendException, self.mediator.get_provisioned_gb, constants.EXPECTED_FPG) expected_calls = [mock.call.getfsquota(fpg=constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_provisioned_exception(self): """Test exception during get provisioned GB.""" self.init_mediator() self.mock_client.getfsquota.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.get_provisioned_gb, constants.EXPECTED_FPG) expected_calls = [mock.call.getfsquota(fpg=constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_ro_access_cifs_error(self): self.init_mediator() self.assertRaises(exception.InvalidShareAccess, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_10203040, constants.READ_ONLY, constants.EXPECTED_FPG, constants.EXPECTED_VFS) @ddt.data(constants.CIFS, constants.NFS) def test_mediator_allow_rw_snapshot_error(self, proto): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', } path = 'foo/.snapshot/foo' if proto == constants.NFS: getfshare_result['sharePath'] = path else: getfshare_result['shareDir'] = path self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } self.assertRaises(exception.InvalidShareAccess, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, proto, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_10203040, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) @ddt.data((constants.READ_WRITE, True), (constants.READ_WRITE, False), (constants.READ_ONLY, True), (constants.READ_ONLY, False)) @ddt.unpack def test_mediator_allow_user_access_cifs(self, access_level, use_other): """"Allow user access to cifs share.""" self.init_mediator() if use_other: # Don't find share until second attempt. findings = (None, self.mock_client.getfshare.return_value['members'][0]) mock_find_fshare = self.mock_object( self.mediator, '_find_fshare', mock.Mock(side_effect=findings)) if access_level == constants.READ_ONLY: expected_allowperm = '+%s:read' % constants.USERNAME else: expected_allowperm = '+%s:fullcontrol' % constants.USERNAME self.mediator.allow_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.USER, constants.USERNAME, access_level, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=expected_allowperm, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) if use_other: readonly = access_level == constants.READ_ONLY expected_find_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=readonly), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=not readonly), ] mock_find_fshare.assert_has_calls(expected_find_calls) @ddt.data(constants.CIFS, constants.NFS) def test_mediator_deny_rw_snapshot_error(self, proto): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', } path = 'foo/.snapshot/foo' if proto == constants.NFS: getfshare_result['sharePath'] = path else: getfshare_result['shareDir'] = path self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, proto, constants.IP, constants.EXPECTED_IP_10203040, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.assertFalse(self.mock_client.setfshare.called) self.assertTrue(mock_log.error.called) def test_mediator_deny_user_access_cifs(self): """"Deny user access to cifs share.""" self.init_mediator() expected_denyperm = '-%s:fullcontrol' % constants.USERNAME self.mediator.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.USER, constants.USERNAME, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=expected_denyperm, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_access_cifs(self): """"Allow ip access to cifs share.""" self.init_mediator() expected_allowip = '+%s' % constants.EXPECTED_IP_1234 self.mediator.allow_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowip=expected_allowip, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_ip_access_cifs(self): """"Deny ip access to cifs share.""" self.init_mediator() expected_denyip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowip=expected_denyip, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_access_nfs(self): """"Allow ip access to nfs share.""" self.init_mediator() already_exists = (hpe3parmediator.IP_ALREADY_EXISTS % constants.EXPECTED_IP_1234) self.mock_client.setfshare.side_effect = ([], [already_exists]) expected_clientip = '+%s' % constants.EXPECTED_IP_1234 for _ in range(2): # Test 2nd allow w/ already exists message. self.mediator.allow_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = 2 * [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT), ] self.mock_client.assert_has_calls(expected_calls, any_order=True) def test_mediator_deny_ip_access_nfs(self): """"Deny ip access to nfs share.""" self.init_mediator() expected_clientip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_ip_ro_access_nfs_legacy(self): self.init_mediator() # Fail to find share with new naming. Succeed finding legacy naming. legacy = { 'shareName': 'foo_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', 'sharePath': 'foo/.snapshot/foo', } fshares = (None, legacy) mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(side_effect=fshares)) expected_clientip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_ONLY, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, legacy['shareName'], clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=legacy['fstoreName'], comment=legacy['comment']) ] self.mock_client.assert_has_calls(expected_calls) expected_find_fshare_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_find_fshare_calls) def test_mediator_allow_user_access_nfs(self): """"Allow user access to nfs share is not supported.""" self.init_mediator() self.assertRaises(exception.HPE3ParInvalid, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.USER, constants.USERNAME, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_bad_proto(self): """"Allow user access to unsupported protocol.""" self.init_mediator() self.assertRaises(exception.InvalidInput, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, 'unsupported_other_protocol', constants.EXPECTED_EXTRA_SPECS, constants.USER, constants.USERNAME, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_bad_type(self): """"Allow user access to unsupported access type.""" self.init_mediator() self.assertRaises(exception.InvalidInput, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, 'unsupported_other_type', constants.USERNAME, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_missing_nfs_share(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.HPE3ParInvalid, self.mediator.allow_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), ] mock_find_fshare.assert_has_calls(expected_calls) def test_mediator_allow_nfs_ro_access(self): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'shareDir': 'foo_dir', 'comment': 'foo_comment', } findings = (None, getfshare_result) mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(side_effect=findings)) self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } share_id = 'foo' self.mediator.allow_access(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_ONLY, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), mock.call(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) ro_share = 'osf-ro-%s' % share_id expected_calls = [ mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, ro_share, clientip=constants.EXPECTED_IP_127_2, comment=getfshare_result['comment'], fpg=constants.EXPECTED_FPG, fstore=getfshare_result['fstoreName'], options='ro,no_root_squash,insecure', sharedir=getfshare_result['shareDir']), mock.call.getfshare(constants.NFS_LOWER, ro_share, fstore=getfshare_result['fstoreName'], fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS), mock.call.setfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, getfshare_result['shareName'], clientip='+%s' % constants.EXPECTED_IP_1234, comment=getfshare_result['comment'], fpg=constants.EXPECTED_FPG, fstore=getfshare_result['fstoreName']), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_access_missing_nfs_share(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.mediator.deny_access( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.IP, constants.READ_WRITE, constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) @ddt.data((hpe3parmediator.ALLOW, 'ip', True, ['IP address foo already exists']), (hpe3parmediator.ALLOW, 'ip', False, ['Another share already exists for this path and client']), (hpe3parmediator.ALLOW, 'user', True, ['"allow" permission already exists for "foo"']), (hpe3parmediator.DENY, 'ip', True, ['foo does not exist, cannot be removed']), (hpe3parmediator.DENY, 'user', True, ['foo:fullcontrol" does not exist, cannot delete it.']), (hpe3parmediator.DENY, 'user', False, ['SMB share osf-foo does not exist']), (hpe3parmediator.ALLOW, 'ip', True, ['\r']), (hpe3parmediator.ALLOW, 'user', True, ['\r']), (hpe3parmediator.DENY, 'ip', True, ['\r']), (hpe3parmediator.DENY, 'user', True, ['\r']), (hpe3parmediator.ALLOW, 'ip', True, []), (hpe3parmediator.ALLOW, 'user', True, []), (hpe3parmediator.DENY, 'ip', True, []), (hpe3parmediator.DENY, 'user', True, [])) @ddt.unpack def test_ignore_benign_access_results(self, access, access_type, expect_false, results): returned = self.mediator.ignore_benign_access_results( access, access_type, 'foo', results) if expect_false: self.assertFalse(returned) else: self.assertEqual(results, returned) @ddt.data((2, 1, True), (2, 1, False), (1, 2, True), (1, 2, False), (1024, 2048, True), (1024, 2048, False), (2048, 1024, True), (2048, 1024, False), (99999999, 1, True), (99999999, 1, False), (1, 99999999, True), (1, 99999999, False), ) @ddt.unpack def test_mediator_resize_share(self, new_size, old_size, fstore_per_share): self.init_mediator() fstore = 'foo_fstore' mock_find_fstore = self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) fstore_init_size = int( constants.GET_FSQUOTA['members'][0]['hardBlock']) self.mediator.hpe3par_fstore_per_share = fstore_per_share if fstore_per_share: expected_capacity = new_size * units.Ki else: expected_capacity = ( (new_size - old_size) * units.Ki + fstore_init_size) self.mediator.resize_share( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fstore.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.mock_client.setfsquota.assert_called_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, fstore=fstore, scapacity=six.text_type(expected_capacity), hcapacity=six.text_type(expected_capacity)) @ddt.data(['This is a fake setfsquota returned error'], Exception('boom')) def test_mediator_resize_share_setfsquota_side_effects(self, side_effect): self.init_mediator() fstore_init_size = int( constants.GET_FSQUOTA['members'][0]['hardBlock']) fstore = 'foo_fstore' new_size = 2 old_size = 1 expected_capacity = (new_size - old_size) * units.Ki + fstore_init_size mock_find_fstore = self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) self.mock_client.setfsquota.side_effect = side_effect self.assertRaises(exception.ShareBackendException, self.mediator.resize_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fstore.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.mock_client.setfsquota.assert_called_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, fstore=fstore, scapacity=six.text_type(expected_capacity), hcapacity=six.text_type(expected_capacity)) def test_mediator_resize_share_not_found(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidShare, self.mediator.resize_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, 999, 99, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) @ddt.data((('nfs', 'NFS', 'nFs'), 'smb'), (('smb', 'SMB', 'SmB', 'CIFS', 'cifs', 'CiFs'), 'nfs')) @ddt.unpack def test_other_protocol(self, protocols, expected_other): for protocol in protocols: self.assertEqual(expected_other, hpe3parmediator.HPE3ParMediator().other_protocol( protocol)) @ddt.data('', 'bogus') def test_other_protocol_exception(self, protocol): self.assertRaises(exception.InvalidInput, hpe3parmediator.HPE3ParMediator().other_protocol, protocol) @ddt.data(('osf-uid', None, None, 'osf-uid'), ('uid', None, True, 'osf-ro-uid'), ('uid', None, False, 'osf-uid'), ('uid', 'smb', True, 'osf-smb-ro-uid'), ('uid', 'smb', False, 'osf-smb-uid'), ('uid', 'nfs', True, 'osf-nfs-ro-uid'), ('uid', 'nfs', False, 'osf-nfs-uid')) @ddt.unpack def test_ensure_prefix(self, uid, protocol, readonly, expected): self.assertEqual(expected, hpe3parmediator.HPE3ParMediator().ensure_prefix( uid, protocol=protocol, readonly=readonly)) def test_find_fstore_search(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) result = self.mediator._find_fstore(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_once_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.assertIsNone(result) def test_find_fstore_search_xproto(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare_with_proto', mock.Mock(return_value=None)) result = self.mediator._find_fstore(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=True) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fshare_search(self): self.init_mediator() self.mock_client.getfshare.return_value = {} result = self.mediator._find_fshare(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_SHARE_ID), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID), ] self.mock_client.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fshare_exception(self): self.init_mediator() self.mock_client.getfshare.side_effect = Exception('test unexpected') self.assertRaises(exception.ShareBackendException, self.mediator._find_fshare, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID) def test_find_fshare_hit(self): self.init_mediator() expected_result = {'shareName': 'hit'} self.mock_client.getfshare.return_value = { 'total': 1, 'members': [expected_result] } result = self.mediator._find_fshare(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), self.assertEqual(expected_result, result) def test_find_fsnap_search(self): self.init_mediator() self.mock_client.getfsnap.return_value = {} result = self.mediator._find_fsnap(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID expected_calls = [ mock.call.getfsnap(expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfsnap(expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_SHARE_ID), mock.call.getfsnap(expected_snap_pattern, fpg=constants.EXPECTED_FPG, pat=True), mock.call.getfsnap(expected_snap_pattern, pat=True), ] self.mock_client.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fsnap_exception(self): self.init_mediator() self.mock_client.getfsnap.side_effect = Exception('test unexpected') self.assertRaises(exception.ShareBackendException, self.mediator._find_fsnap, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID self.mock_client.getfsnap.assert_called_once_with( expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID) def test_find_fsnap_hit(self): self.init_mediator() expected_result = {'snapName': 'hit'} self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [expected_result] } result = self.mediator._find_fsnap(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID self.mock_client.getfsnap.assert_called_once_with( expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID) self.assertEqual(expected_result, result) def test_fsip_exists(self): self.init_mediator() # Make the result member a superset of the fsip items. fsip_plus = constants.EXPECTED_FSIP.copy() fsip_plus.update({'k': 'v', 'k2': 'v2'}) self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, fsip_plus, {'bogus2': '2'}] } self.assertTrue(self.mediator.fsip_exists(constants.EXPECTED_FSIP)) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_fsip_does_not_exist(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, constants.OTHER_FSIP, {'bogus2': '2'}] } self.assertFalse(self.mediator.fsip_exists(constants.EXPECTED_FSIP)) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_fsip_exists_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.getfsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.fsip_exists, constants.EXPECTED_FSIP) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_create_fsip_success(self): self.init_mediator() # Make the result member a superset of the fsip items. fsip_plus = constants.EXPECTED_FSIP.copy() fsip_plus.update({'k': 'v', 'k2': 'v2'}) self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, fsip_plus, {'bogus2': '2'}] } self.mediator.create_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.createfsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_create_fsip_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.createfsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.create_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.createfsip.assert_called_once_with( constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG) def test_create_fsip_get_none(self): self.init_mediator() self.mock_client.getfsip.return_value = {'members': []} self.assertRaises(exception.ShareBackendException, self.mediator.create_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.createfsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_remove_fsip_success(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'members': [constants.OTHER_FSIP] } self.mediator.remove_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefsip(constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) @ddt.data(('ip', None), ('ip', ''), (None, 'vfs'), ('', 'vfs'), (None, None), ('', '')) @ddt.unpack def test_remove_fsip_without_ip_or_vfs(self, ip, vfs): self.init_mediator() self.mediator.remove_fsip(ip, constants.EXPECTED_FPG, vfs) self.assertFalse(self.mock_client.removefsip.called) def test_remove_fsip_not_gone(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'members': [constants.EXPECTED_FSIP] } self.assertRaises(exception.ShareBackendException, self.mediator.remove_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefsip(constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_remove_fsip_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.removefsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.remove_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.removefsip.assert_called_once_with( constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG) def test__create_mount_directory(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(return_value={})) mount_location = '/mnt/foo' self.mediator._create_mount_directory(mount_location) utils.execute.assert_called_with('mkdir', mount_location, run_as_root=True) def test__create_mount_directory_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('mkdir error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_location = '/mnt/foo' self.mediator._create_mount_directory(mount_location) utils.execute.assert_called_with('mkdir', mount_location, run_as_root=True) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__mount_super_share(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(return_value={})) # Test mounting NFS share. protocol = 'nfs' mount_location = '/mnt/foo' fpg = 'foo-fpg' vfs = 'bar-vfs' fstore = 'fstore' mount_path = '%s:/%s/%s/%s/' % (constants.EXPECTED_IP_10203040, fpg, vfs, fstore) self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore) utils.execute.assert_called_with('mount', '-t', protocol, mount_path, mount_location, run_as_root=True) # Test mounting CIFS share. protocol = 'smb' mount_path = '//%s/%s/' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SUPER_SHARE) user = 'username=%s,password=%s,domain=%s' % ( constants.USERNAME, constants.PASSWORD, constants.EXPECTED_CIFS_DOMAIN) self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore) utils.execute.assert_called_with('mount', '-t', 'cifs', mount_path, mount_location, '-o', user, run_as_root=True) def test__mount_super_share_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('mount error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') protocol = 'nfs' mount_location = '/mnt/foo' fpg = 'foo-fpg' vfs = 'bar-vfs' fstore = 'fstore' self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__delete_share_directory(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(return_value={})) mount_location = '/mnt/foo' self.mediator._delete_share_directory(mount_location) utils.execute.assert_called_with('rm', '-rf', mount_location, run_as_root=True) def test__delete_share_directory_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('rm error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_location = '/mnt/foo' self.mediator._delete_share_directory(mount_location) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__unmount_super_share(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(return_value={})) mount_location = '/mnt/foo' self.mediator._unmount_super_share(mount_location) utils.execute.assert_called_with('umount', mount_location, run_as_root=True) def test__unmount_super_share_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('umount error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_location = '/mnt/foo' self.mediator._unmount_super_share(mount_location) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__delete_file_tree_no_config_options(self): self.init_mediator() mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.hpe3par_cifs_admin_access_username = None self.mediator._delete_file_tree( constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__create_super_share_createfshare_exception(self): self.init_mediator() self.mock_client.createfshare.side_effect = ( Exception("createfshare error.")) self.assertRaises( exception.ShareBackendException, self.mediator._create_super_share, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE) def test__create_super_share_setfshare_exception(self): self.init_mediator() self.mock_client.setfshare.side_effect = ( Exception("setfshare error.")) self.assertRaises( exception.ShareMountException, self.mediator._create_super_share, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE) class OptionMatcher(object): """Options string order can vary. Compare as lists.""" def __init__(self, assert_func, expected_string): self.assert_func = assert_func self.expected = expected_string.split(',') def __eq__(self, actual_string): actual = actual_string.split(',') self.assert_func(sorted(self.expected), sorted(actual)) return True manila-2.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_driver.py0000664000567000056710000007453112701407107026550 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import ddt import mock if 'hpe3parclient' not in sys.modules: sys.modules['hpe3parclient'] = mock.Mock() from manila import exception from manila.share.drivers.hpe import hpe_3par_driver as hpe3pardriver from manila.share.drivers.hpe import hpe_3par_mediator as hpe3parmediator from manila import test from manila.tests.share.drivers.hpe import test_hpe_3par_constants as constants @ddt.ddt class HPE3ParDriverTestCase(test.TestCase): def setUp(self): super(HPE3ParDriverTestCase, self).setUp() # Create a mock configuration with attributes and a safe_get() self.conf = mock.Mock() self.conf.driver_handles_share_servers = True self.conf.hpe3par_debug = constants.EXPECTED_HPE_DEBUG self.conf.hpe3par_username = constants.USERNAME self.conf.hpe3par_password = constants.PASSWORD self.conf.hpe3par_api_url = constants.API_URL self.conf.hpe3par_san_login = constants.SAN_LOGIN self.conf.hpe3par_san_password = constants.SAN_PASSWORD self.conf.hpe3par_san_ip = constants.EXPECTED_IP_1234 self.conf.hpe3par_fpg = constants.EXPECTED_FPG self.conf.hpe3par_san_ssh_port = constants.PORT self.conf.ssh_conn_timeout = constants.TIMEOUT self.conf.hpe3par_share_ip_address = None self.conf.hpe3par_fstore_per_share = False self.conf.hpe3par_require_cifs_ip = False self.conf.hpe3par_share_ip_address = constants.EXPECTED_IP_10203040 self.conf.hpe3par_cifs_admin_access_username = constants.USERNAME, self.conf.hpe3par_cifs_admin_access_password = constants.PASSWORD, self.conf.hpe3par_cifs_admin_access_domain = ( constants.EXPECTED_CIFS_DOMAIN), self.conf.hpe3par_share_mount_path = constants.EXPECTED_MOUNT_PATH, self.conf.my_ip = constants.EXPECTED_IP_1234 self.conf.network_config_group = 'test_network_config_group' self.conf.admin_network_config_group = ( 'test_admin_network_config_group') def safe_get(attr): try: return self.conf.__getattribute__(attr) except AttributeError: return None self.conf.safe_get = safe_get self.real_hpe_3par_mediator = hpe3parmediator.HPE3ParMediator self.mock_object(hpe3parmediator, 'HPE3ParMediator') self.mock_mediator_constructor = hpe3parmediator.HPE3ParMediator self.mock_mediator = self.mock_mediator_constructor() self.driver = hpe3pardriver.HPE3ParShareDriver( configuration=self.conf) def test_driver_setup_success(self): """Driver do_setup without any errors.""" self.mock_mediator.get_vfs_name.return_value = constants.EXPECTED_VFS self.driver.do_setup(None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_share_ip_address=( self.conf.hpe3par_share_ip_address), hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([ mock.call.do_setup(), mock.call.get_vfs_name(conf.hpe3par_fpg)]) self.assertEqual(constants.EXPECTED_VFS, self.driver.vfs) def test_driver_setup_no_dhss_success(self): """Driver do_setup without any errors with dhss=False.""" self.conf.driver_handles_share_servers = False self.conf.hpe3par_share_ip_address = constants.EXPECTED_IP_10203040 self.test_driver_setup_success() def test_driver_setup_no_ss_no_ip(self): """Configured IP address is required for dhss=False.""" self.conf.driver_handles_share_servers = False self.conf.hpe3par_share_ip_address = None self.assertRaises(exception.HPE3ParInvalid, self.driver.do_setup, None) def test_driver_with_setup_error(self): """Driver do_setup when the mediator setup fails.""" self.mock_mediator.do_setup.side_effect = ( exception.ShareBackendException('fail')) self.assertRaises(exception.ShareBackendException, self.driver.do_setup, None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_share_ip_address=( self.conf.hpe3par_share_ip_address), hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([mock.call.do_setup()]) def test_driver_with_vfs_error(self): """Driver do_setup when the get_vfs_name fails.""" self.mock_mediator.get_vfs_name.side_effect = ( exception.ShareBackendException('fail')) self.assertRaises(exception.ShareBackendException, self.driver.do_setup, None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_share_ip_address=( self.conf.hpe3par_share_ip_address), hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([ mock.call.do_setup(), mock.call.get_vfs_name(conf.hpe3par_fpg)]) def init_driver(self): """Simple driver setup for re-use with tests that need one.""" self.driver._hpe3par = self.mock_mediator self.driver.vfs = constants.EXPECTED_VFS self.driver.fpg = constants.EXPECTED_FPG self.mock_object(hpe3pardriver, 'share_types') get_extra_specs = hpe3pardriver.share_types.get_extra_specs_from_share get_extra_specs.return_value = constants.EXPECTED_EXTRA_SPECS def do_create_share(self, protocol, share_type_id, expected_project_id, expected_share_id, expected_size): """Re-usable code for create share.""" context = None share_server = { 'backend_details': {'ip': constants.EXPECTED_IP_10203040}} share = { 'display_name': constants.EXPECTED_SHARE_NAME, 'host': constants.EXPECTED_HOST, 'project_id': expected_project_id, 'id': expected_share_id, 'share_proto': protocol, 'share_type_id': share_type_id, 'size': expected_size, } location = self.driver.create_share(context, share, share_server) return location def do_create_share_from_snapshot(self, protocol, share_type_id, snapshot_instance, expected_share_id, expected_size): """Re-usable code for create share from snapshot.""" context = None share_server = { 'backend_details': { 'ip': constants.EXPECTED_IP_10203040, }, } share = { 'project_id': constants.EXPECTED_PROJECT_ID, 'display_name': constants.EXPECTED_SHARE_NAME, 'host': constants.EXPECTED_HOST, 'id': expected_share_id, 'share_proto': protocol, 'share_type_id': share_type_id, 'size': expected_size, } location = self.driver.create_share_from_snapshot(context, share, snapshot_instance, share_server) return location def test_driver_check_for_setup_error_success(self): """check_for_setup_error when things go well.""" # Generally this is always mocked, but here we reference the class. hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator self.mock_object(hpe3pardriver, 'LOG') self.init_driver() self.driver.check_for_setup_error() expected_calls = [ mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY), mock.call.debug('HPE3ParMediator SHA1: %s', mock.ANY) ] hpe3pardriver.LOG.assert_has_calls(expected_calls) def test_driver_check_for_setup_error_exception(self): """check_for_setup_error catch and log any exceptions.""" # Since HPE3ParMediator is mocked, we'll hit the except/log. self.mock_object(hpe3pardriver, 'LOG') self.init_driver() self.driver.check_for_setup_error() expected_calls = [ mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY), mock.call.debug('Source code SHA1 not logged due to: %s', mock.ANY) ] hpe3pardriver.LOG.assert_has_calls(expected_calls) def test_driver_create_cifs_share(self): self.init_driver() expected_location = '\\\\%s\%s' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_NAME) self.mock_mediator.create_share.return_value = ( constants.EXPECTED_SHARE_NAME) location = self.do_create_share(constants.CIFS, constants.SHARE_TYPE_ID, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_2) self.assertEqual(expected_location, location) expected_calls = [mock.call.create_share( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY, size=constants.EXPECTED_SIZE_2)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_nfs_share(self): self.init_driver() expected_location = ':'.join((constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_PATH)) self.mock_mediator.create_share.return_value = ( constants.EXPECTED_SHARE_PATH) location = self.do_create_share(constants.NFS, constants.SHARE_TYPE_ID, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1) self.assertEqual(expected_location, location) expected_calls = [ mock.call.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY, size=constants.EXPECTED_SIZE_1)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_cifs_share_from_snapshot(self): self.init_driver() expected_location = '\\\\%s\%s' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_NAME) self.mock_mediator.create_share_from_snapshot.return_value = ( constants.EXPECTED_SHARE_NAME) snapshot_instance = constants.SNAPSHOT_INSTANCE.copy() snapshot_instance['protocol'] = constants.CIFS location = self.do_create_share_from_snapshot( constants.CIFS, constants.SHARE_TYPE_ID, snapshot_instance, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_2) self.assertEqual(expected_location, location) expected_calls = [ mock.call.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FSTORE, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY), ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_nfs_share_from_snapshot(self): self.init_driver() expected_location = ':'.join((constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_PATH)) self.mock_mediator.create_share_from_snapshot.return_value = ( constants.EXPECTED_SHARE_PATH) location = self.do_create_share_from_snapshot( constants.NFS, constants.SHARE_TYPE_ID, constants.SNAPSHOT_INSTANCE, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1) self.assertEqual(expected_location, location) expected_calls = [ mock.call.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY), ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_delete_share(self): self.init_driver() context = None share_server = None share = { 'project_id': constants.EXPECTED_PROJECT_ID, 'id': constants.EXPECTED_SHARE_ID, 'share_proto': constants.CIFS, 'size': constants.EXPECTED_SIZE_1, } self.driver.delete_share(context, share, share_server) expected_calls = [ mock.call.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_snapshot(self): self.init_driver() context = None share_server = None self.driver.create_snapshot(context, constants.SNAPSHOT_INFO, share_server) expected_calls = [ mock.call.create_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_delete_snapshot(self): self.init_driver() context = None share_server = None self.driver.delete_snapshot(context, constants.SNAPSHOT_INFO, share_server) expected_calls = [ mock.call.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_allow_access(self): self.init_driver() context = None self.driver.allow_access(context, constants.NFS_SHARE_INFO, constants.ACCESS_INFO) expected_calls = [ mock.call.allow_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.IP, constants.EXPECTED_IP_1234, constants.ACCESS_INFO['access_level'], constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_deny_access(self): self.init_driver() context = None self.driver.deny_access(context, constants.NFS_SHARE_INFO, constants.ACCESS_INFO) expected_calls = [ mock.call.deny_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.IP, constants.EXPECTED_IP_1234, constants.READ_WRITE, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_extend_share(self): self.init_driver() old_size = constants.NFS_SHARE_INFO['size'] new_size = old_size * 2 self.driver.extend_share(constants.NFS_SHARE_INFO, new_size) self.mock_mediator.resize_share.assert_called_once_with( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_driver_shrink_share(self): self.init_driver() old_size = constants.NFS_SHARE_INFO['size'] new_size = old_size / 2 self.driver.shrink_share(constants.NFS_SHARE_INFO, new_size) self.mock_mediator.resize_share.assert_called_once_with( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_driver_get_share_stats_not_ready(self): """Protect against stats update before driver is ready.""" self.mock_object(hpe3pardriver, 'LOG') expected_result = { 'driver_handles_share_servers': True, 'qos': False, 'driver_version': self.driver.VERSION, 'free_capacity_gb': 0, 'max_over_subscription_ratio': None, 'reserved_percentage': 0, 'provisioned_capacity_gb': 0, 'share_backend_name': 'HPE_3PAR', 'snapshot_support': True, 'storage_protocol': 'NFS_CIFS', 'thin_provisioning': True, 'total_capacity_gb': 0, 'vendor_name': 'HPE', 'pools': None, 'replication_domain': None, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) expected_calls = [ mock.call.info('Skipping capacity and capabilities update. ' 'Setup has not completed.') ] hpe3pardriver.LOG.assert_has_calls(expected_calls) def test_driver_get_share_stats_no_refresh(self): """Driver does not call mediator when refresh=False.""" self.init_driver() self.driver._stats = constants.EXPECTED_STATS result = self.driver.get_share_stats(refresh=False) self.assertEqual(constants.EXPECTED_STATS, result) self.assertEqual([], self.mock_mediator.mock_calls) def test_driver_get_share_stats_with_refresh(self): """Driver adds stats from mediator to expected structure.""" self.init_driver() expected_free = constants.EXPECTED_SIZE_1 expected_capacity = constants.EXPECTED_SIZE_2 expected_version = self.driver.VERSION self.mock_mediator.get_fpg_status.return_value = { 'free_capacity_gb': expected_free, 'total_capacity_gb': expected_capacity, 'thin_provisioning': True, 'dedupe': False, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, } expected_result = { 'driver_handles_share_servers': True, 'qos': False, 'driver_version': expected_version, 'free_capacity_gb': expected_free, 'max_over_subscription_ratio': None, 'pools': None, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'share_backend_name': 'HPE_3PAR', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': expected_capacity, 'vendor_name': 'HPE', 'thin_provisioning': True, 'dedupe': False, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, 'snapshot_support': True, 'replication_domain': None, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) expected_calls = [ mock.call.get_fpg_status(constants.EXPECTED_FPG) ] self.mock_mediator.assert_has_calls(expected_calls) self.assertTrue(self.mock_mediator.get_fpg_status.called) def test_driver_get_share_stats_premature(self): """Driver init stats before init_driver completed.""" expected_version = self.driver.VERSION self.mock_mediator.get_fpg_status.return_value = {'not_called': 1} expected_result = { 'qos': False, 'driver_handles_share_servers': True, 'driver_version': expected_version, 'free_capacity_gb': 0, 'max_over_subscription_ratio': None, 'pools': None, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'share_backend_name': 'HPE_3PAR', 'storage_protocol': 'NFS_CIFS', 'thin_provisioning': True, 'total_capacity_gb': 0, 'vendor_name': 'HPE', 'snapshot_support': True, 'replication_domain': None, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) self.assertFalse(self.mock_mediator.get_fpg_status.called) @ddt.data(('test"dquote', 'test_dquote'), ("test'squote", "test_squote"), ('test-:;,.punc', 'test-:_punc'), ('test with spaces ', 'test with spaces '), ('x' * 300, 'x' * 300)) @ddt.unpack def test_build_comment(self, display_name, clean_name): host = 'test-stack1@backend#pool' share = { 'host': host, 'display_name': display_name } comment = self.driver.build_share_comment(share) cleaned = { 'host': host, 'clean_name': clean_name } expected = ("OpenStack Manila - host=%(host)s " "orig_name=%(clean_name)s created=" % cleaned)[:254] self.assertLess(len(comment), 255) self.assertTrue(comment.startswith(expected)) # Test for some chars that are not allowed. # Don't test with same regex as the code uses. for c in "'\".,;": self.assertNotIn(c, comment) def test_get_network_allocations_number(self): self.assertEqual(1, self.driver.get_network_allocations_number()) def test_build_export_location_bad_protocol(self): self.assertRaises(exception.InvalidInput, self.driver._build_export_location, "BOGUS", constants.EXPECTED_IP_1234, constants.EXPECTED_SHARE_PATH) def test_build_export_location_bad_ip(self): self.assertRaises(exception.InvalidInput, self.driver._build_export_location, constants.NFS, None, None) def test_build_export_location_bad_path(self): self.assertRaises(exception.InvalidInput, self.driver._build_export_location, constants.NFS, constants.EXPECTED_IP_1234, None) def test_setup_server(self): """Setup server by creating a new FSIP.""" self.init_driver() network_info = { 'network_allocations': [ {'ip_address': constants.EXPECTED_IP_1234}], 'cidr': '/'.join((constants.EXPECTED_IP_1234, constants.CIDR_PREFIX)), 'network_type': constants.EXPECTED_VLAN_TYPE, 'segmentation_id': constants.EXPECTED_VLAN_TAG, 'server_id': constants.EXPECTED_SERVER_ID, } expected_result = { 'share_server_name': constants.EXPECTED_SERVER_ID, 'share_server_id': constants.EXPECTED_SERVER_ID, 'ip': constants.EXPECTED_IP_1234, 'subnet': constants.EXPECTED_SUBNET, 'vlantag': constants.EXPECTED_VLAN_TAG, 'fpg': constants.EXPECTED_FPG, 'vfs': constants.EXPECTED_VFS, } result = self.driver._setup_server(network_info) expected_calls = [ mock.call.create_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) self.assertEqual(expected_result, result) def test_teardown_server(self): self.init_driver() server_details = { 'ip': constants.EXPECTED_IP_1234, 'fpg': constants.EXPECTED_FPG, 'vfs': constants.EXPECTED_VFS, } self.driver._teardown_server(server_details) expected_calls = [ mock.call.remove_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) manila-2.0.0/manila/tests/share/drivers/tegile/0000775000567000056710000000000012701407265022553 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/tegile/__init__.py0000664000567000056710000000000012701407107024645 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/tegile/test_tegile.py0000664000567000056710000007623412701407107025444 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver Test for Tegile storage. """ import ddt import mock from oslo_config import cfg import requests import six from manila.common import constants as const from manila import context from manila import exception from manila.exception import TegileAPIException from manila.share.configuration import Configuration from manila.share.drivers.tegile import tegile from manila import test CONF = cfg.CONF test_config = Configuration(None) test_config.tegile_nas_server = 'some-ip' test_config.tegile_nas_login = 'some-user' test_config.tegile_nas_password = 'some-password' test_config.reserved_share_percentage = 10 test_config.max_over_subscription_ratio = 30.0 test_share = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'NFS', 'size': 10, } test_share_cifs = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'CIFS', 'size': 10, } test_share_fail = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'OTHER', 'size': 10, } test_snapshot = { 'name': 'testSnap', 'id': '07ae9978-5445-405e-8881-28f2adfee732', 'share': test_share, 'share_name': 'snapshotted', 'display_name': 'disp', 'display_description': 'disp-desc', } array_stats = { 'total_capacity_gb': 4569.199686084874, 'free_capacity_gb': 4565.381390112452, 'pools': [ { 'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 911.812650680542, 'reserved_percentage': 0, 'pool_name': 'pyramid', }, { 'total_capacity_gb': 2742.1996604874, 'QoS_support': False, 'free_capacity_gb': 2740.148867149747, 'reserved_percentage': 0, 'pool_name': 'cobalt', }, { 'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 913.4198722839355, 'reserved_percentage': 0, 'pool_name': 'test', }, ], } fake_tegile_backend_fail = mock.Mock( side_effect=TegileAPIException(response="Fake Exception")) class FakeResponse(object): def __init__(self, status, json_output): self.status_code = status self.text = 'Random text' self._json = json_output def json(self): return self._json def close(self): pass @ddt.ddt class TegileShareDriverTestCase(test.TestCase): def __init__(self, *args, **kwds): super(TegileShareDriverTestCase, self).__init__(*args, **kwds) self._ctxt = context.get_admin_context() self.configuration = test_config def setUp(self): CONF.set_default('driver_handles_share_servers', False) self._driver = tegile.TegileShareDriver( configuration=self.configuration) self._driver._default_project = 'fake_project' super(TegileShareDriverTestCase, self).setUp() def test_create_share(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) result = self._driver.create_share(self._ctxt, test_share) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', } self.assertEqual(expected, result) create_params = ( 'fake_pool', 'fake_project', test_share['name'], test_share['share_proto'], ) mock_api.assert_called_once_with('createShare', create_params) def test_create_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.create_share, self._ctxt, test_share) create_params = ( 'fake_pool', 'fake_project', test_share['name'], test_share['share_proto'], ) mock_api.assert_called_once_with('createShare', create_params) def test_delete_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.delete_share(self._ctxt, test_share) delete_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) delete_params = (delete_path, True, False) mock_api.assert_called_once_with('deleteShare', delete_params) mock_params.assert_called_once_with(test_share) def test_delete_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.delete_share, self._ctxt, test_share) delete_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) delete_params = (delete_path, True, False) mock_api.assert_called_once_with('deleteShare', delete_params) def test_create_snapshot(self): mock_api = self.mock_object(self._driver, '_api') fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) self._driver.create_snapshot(self._ctxt, test_snapshot) share = { 'poolName': 'fake_pool', 'projectName': 'fake_project', 'name': test_share['name'], 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', ), 'mountpoint': test_share['name'], 'local': 'true', } create_params = (share, test_snapshot['name'], False) mock_api.assert_called_once_with('createShareSnapshot', create_params) mock_params.assert_called_once_with(test_share) def test_create_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.create_snapshot, self._ctxt, test_snapshot) share = { 'poolName': 'fake_pool', 'projectName': 'fake_project', 'name': test_share['name'], 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', ), 'mountpoint': test_share['name'], 'local': 'true', } create_params = (share, test_snapshot['name'], False) mock_api.assert_called_once_with('createShareSnapshot', create_params) def test_delete_snapshot(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.delete_snapshot(self._ctxt, test_snapshot) delete_snap_path = ('%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], 'Manual-S-', test_snapshot['name'], )) delete_params = (delete_snap_path, False) mock_api.assert_called_once_with('deleteShareSnapshot', delete_params) mock_params.assert_called_once_with(test_share) def test_delete_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.delete_snapshot, self._ctxt, test_snapshot) delete_snap_path = ('%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], 'Manual-S-', test_snapshot['name'], )) delete_params = (delete_snap_path, False) mock_api.assert_called_once_with('deleteShareSnapshot', delete_params) def test_create_share_from_snapshot(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) result = self._driver.create_share_from_snapshot(self._ctxt, test_share, test_snapshot) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', } self.assertEqual(expected, result) create_params = ( '%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_snapshot['share_name'], 'Manual-S-', test_snapshot['name'], ), test_share['name'], True, ) mock_api.assert_called_once_with('cloneShareSnapshot', create_params) mock_params.assert_called_once_with(test_share) def test_create_share_from_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.create_share_from_snapshot, self._ctxt, test_share, test_snapshot) create_params = ( '%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_snapshot['share_name'], 'Manual-S-', test_snapshot['name'], ), test_share['name'], True, ) mock_api.assert_called_once_with('cloneShareSnapshot', create_params) def test_ensure_share(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) result = self._driver.ensure_share(self._ctxt, test_share) expected = [ { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', }, ] self.assertEqual(expected, result) ensure_params = [ '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'])] mock_api.assert_called_once_with('getShareIPAndMountPoint', ensure_params) mock_params.assert_called_once_with(test_share) def test_ensure_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.ensure_share, self._ctxt, test_share) ensure_params = [ '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'])] mock_api.assert_called_once_with('getShareIPAndMountPoint', ensure_params) def test_get_share_stats(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=array_stats)) result_dict = self._driver.get_share_stats(True) expected_dict = { 'driver_handles_share_servers': False, 'driver_version': '1.0.0', 'free_capacity_gb': 4565.381390112452, 'pools': [ { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 911.812650680542, 'pool_name': 'pyramid', 'qos': False, 'reserved_percentage': 10, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 913.5}, { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 2740.148867149747, 'pool_name': 'cobalt', 'qos': False, 'reserved_percentage': 10, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 2742.1996604874 }, { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 913.4198722839355, 'pool_name': 'test', 'qos': False, 'reserved_percentage': 10, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 913.5}, ], 'qos': False, 'reserved_percentage': 0, 'replication_domain': None, 'share_backend_name': 'Tegile', 'snapshot_support': True, 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 4569.199686084874, 'vendor_name': 'Tegile Systems Inc.', } self.assertSubDictMatch(expected_dict, result_dict) mock_api.assert_called_once_with(fine_logging=False, method='getArrayStats', request_type='get') def test_get_share_stats_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.get_share_stats, True) mock_api.assert_called_once_with(fine_logging=False, method='getArrayStats', request_type='get') def test_get_pool(self): result = self._driver.get_pool(test_share) expected = 'fake_pool' self.assertEqual(expected, result) def test_extend_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.extend_share(test_share, 12) extend_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) extend_params = (extend_path, six.text_type(12), 'GB') mock_api.assert_called_once_with('resizeShare', extend_params) mock_params.assert_called_once_with(test_share) def test_extend_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.extend_share, test_share, 30) extend_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) extend_params = (extend_path, six.text_type(30), 'GB') mock_api.assert_called_once_with('resizeShare', extend_params) def test_shrink_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.shrink_share(test_share, 15) shrink_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) shrink_params = (shrink_path, six.text_type(15), 'GB') mock_api.assert_called_once_with('resizeShare', shrink_params) mock_params.assert_called_once_with(test_share) def test_shrink_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) self.assertRaises(TegileAPIException, self._driver.shrink_share, test_share, 30) shrink_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) shrink_params = (shrink_path, six.text_type(30), 'GB') mock_api.assert_called_once_with('resizeShare', shrink_params) @ddt.data('ip', 'user') def test_allow_access(self, access_type): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', } self._driver._allow_access(self._ctxt, test_share, access) allow_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], access_type, access['access_to'], access['access_level'], ) mock_api.assert_called_once_with('shareAllowAccess', allow_params) mock_params.assert_called_once_with(test_share) @ddt.data({'access_type': 'other', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.TegileAPIException}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_cifs, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_fail, 'exception_type': exception.InvalidShareAccess}) @ddt.unpack def test_allow_access_fail(self, access_type, to, share, exception_type): self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': to, } self.assertRaises(exception_type, self._driver._allow_access, self._ctxt, share, access) @ddt.data('ip', 'user') def test_deny_access(self, access_type): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', } self._driver._deny_access(self._ctxt, test_share, access) deny_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], access_type, access['access_to'], access['access_level'], ) mock_api.assert_called_once_with('shareDenyAccess', deny_params) mock_params.assert_called_once_with(test_share) @ddt.data({'access_type': 'other', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.TegileAPIException}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_cifs, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_fail, 'exception_type': exception.InvalidShareAccess}) @ddt.unpack def test_deny_access_fail(self, access_type, to, share, exception_type): self.mock_object(self._driver, '_api', mock.Mock( side_effect=TegileAPIException( response="Fake Exception"))) access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': to, } self.assertRaises(exception_type, self._driver._deny_access, self._ctxt, share, access) @ddt.data({'access_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', }, ], 'add_rules': None, 'delete_rules': None, 'call_name': 'shareAllowAccess'}, {'access_rules': [], 'add_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip'}, ], 'delete_rules': [], 'call_name': 'shareAllowAccess'}, {'access_rules': [], 'add_rules': [], 'delete_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', }, ], 'call_name': 'shareDenyAccess'}) @ddt.unpack def test_update_access(self, access_rules, add_rules, delete_rules, call_name): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.update_access(self._ctxt, test_share, access_rules=access_rules, add_rules=add_rules, delete_rules=delete_rules) allow_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], 'ip', 'some-ip', const.ACCESS_LEVEL_RW, ) if not (add_rules or delete_rules): clear_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], ) mock_api.assert_has_calls([mock.call('clearAccessRules', clear_params), mock.call(call_name, allow_params)]) mock_params.assert_called_with(test_share) else: mock_api.assert_called_once_with(call_name, allow_params) mock_params.assert_called_once_with(test_share) @ddt.data({'path': r'\\some-ip\shareName', 'share_proto': 'CIFS', 'host': 'some-ip'}, {'path': 'some-ip:shareName', 'share_proto': 'NFS', 'host': 'some-ip'}, {'path': 'some-ip:shareName', 'share_proto': 'NFS', 'host': None}) @ddt.unpack def test_get_location_path(self, path, share_proto, host): self._driver._hostname = 'some-ip' result = self._driver._get_location_path('shareName', share_proto, host) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': path, } self.assertEqual(expected, result) def test_get_location_path_fail(self): self.assertRaises(exception.InvalidInput, self._driver._get_location_path, 'shareName', 'SOME', 'some-ip') def test_get_network_allocations_number(self): result = self._driver.get_network_allocations_number() expected = 0 self.assertEqual(expected, result) class TegileAPIExecutorTestCase(test.TestCase): def setUp(self): self._api = tegile.TegileAPIExecutor("TestCase", test_config.tegile_nas_server, test_config.tegile_nas_login, test_config.tegile_nas_password) super(TegileAPIExecutorTestCase, self).setUp() def test_send_api_post(self): json_output = {'value': 'abc'} self.mock_object(requests, 'post', mock.Mock(return_value=FakeResponse(200, json_output))) result = self._api(method="Test", request_type='post', params='[]', fine_logging=True) self.assertEqual(json_output, result) def test_send_api_get(self): json_output = {'value': 'abc'} self.mock_object(requests, 'get', mock.Mock(return_value=FakeResponse(200, json_output))) result = self._api(method="Test", request_type='get', fine_logging=False) self.assertEqual(json_output, result) def test_send_api_get_fail(self): self.mock_object(requests, 'get', mock.Mock(return_value=FakeResponse(404, []))) self.assertRaises(TegileAPIException, self._api, method="Test", request_type='get', fine_logging=False) def test_send_api_value_error_fail(self): json_output = {'value': 'abc'} self.mock_object(requests, 'post', mock.Mock(return_value=FakeResponse(200, json_output))) self.mock_object(FakeResponse, 'json', mock.Mock(side_effect=ValueError)) result = self._api(method="Test", request_type='post', fine_logging=False) expected = '' self.assertEqual(expected, result) manila-2.0.0/manila/tests/share/drivers/emc/0000775000567000056710000000000012701407265022046 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/test_driver.py0000664000567000056710000001140412701407107024745 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from stevedore import extension from manila import network from manila.share import configuration as conf from manila.share.drivers.emc import driver as emcdriver from manila.share.drivers.emc.plugins import base from manila import test class FakeConnection(base.StorageConnection): def __init__(self, logger): self.logger = logger @property def driver_handles_share_servers(self): return True def create_share(self, context, share, share_server): """Is called to create share.""" def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" def delete_share(self, context, share, share_server): """Is called to remove share.""" def extend_share(self, share, new_size, share_server): """Is called to extend share.""" def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" def ensure_share(self, context, share, share_server): """Invoked to sure that share is exported.""" def allow_access(self, context, share, access, share_server): """Allow access to the share.""" def deny_access(self, context, share, access, share_server): """Deny access to the share.""" def raise_connect_error(self): """Check for setup error.""" def connect(self, emc_share_driver, context): """Any initialization the share driver does while starting.""" raise NotImplementedError() def update_share_stats(self, stats_dict): """Add key/values to stats_dict.""" def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 0 def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" def teardown_server(self, server_details, security_services=None): """Teardown share server.""" FAKE_BACKEND = 'fake_backend' class FakeEMCExtensionManager(object): def __init__(self): self.extensions = [] self.extensions.append( extension.Extension(name=FAKE_BACKEND, plugin=FakeConnection, entry_point=None, obj=None)) class EMCShareFrameworkTestCase(test.TestCase): @mock.patch('stevedore.extension.ExtensionManager', mock.Mock(return_value=FakeEMCExtensionManager())) def setUp(self): super(EMCShareFrameworkTestCase, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.share_backend_name = FAKE_BACKEND self.mock_object(self.configuration, 'safe_get', self._fake_safe_get) self.mock_object(network, 'API') self.driver = emcdriver.EMCShareDriver( configuration=self.configuration) def test_driver_setup(self): FakeConnection.connect = mock.Mock() self.driver.do_setup(None) self.assertIsInstance(self.driver.plugin, FakeConnection, "Not an instance of FakeConnection") FakeConnection.connect.assert_called_with(self.driver, None) def test_update_share_stats(self): data = {} self.driver.plugin = mock.Mock() self.driver._update_share_stats() data["share_backend_name"] = FAKE_BACKEND data["driver_handles_share_servers"] = True data["vendor_name"] = 'EMC' data["driver_version"] = '1.0' data["storage_protocol"] = 'NFS_CIFS' data['total_capacity_gb'] = 'unknown' data['free_capacity_gb'] = 'unknown' data['reserved_percentage'] = 0 data['qos'] = False data['pools'] = None data['snapshot_support'] = True data['replication_domain'] = None self.assertEqual(data, self.driver._stats) def _fake_safe_get(self, value): if value in ['emc_share_backend', 'share_backend_name']: return FAKE_BACKEND elif value == 'driver_handles_share_servers': return True return None manila-2.0.0/manila/tests/share/drivers/emc/__init__.py0000664000567000056710000000000012701407107024140 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/0000775000567000056710000000000012701407265023527 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/0000775000567000056710000000000012701407265024342 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/test_connector.py0000664000567000056710000001742312701407107027747 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import greenthread import mock from oslo_concurrency import processutils from six.moves.urllib import error as url_error # pylint: disable=E0611 from six.moves.urllib import request as url_request # pylint: disable=E0611 from manila import exception from manila.share import configuration as conf from manila.share.drivers.emc.plugins.vnx import connector from manila import test from manila.tests.share.drivers.emc.plugins.vnx import fakes from manila.tests.share.drivers.emc.plugins.vnx import utils as emc_utils from manila import utils class XMLAPIConnectorTestData(object): FAKE_BODY = '' FAKE_RESP = '' FAKE_METHOD = 'fake_method' FAKE_KEY = 'key' FAKE_VALUE = 'value' @staticmethod def req_auth_url(): return 'https://' + fakes.FakeData.emc_nas_server + '/Login' @staticmethod def req_credential(): return ( 'user=' + fakes.FakeData.emc_nas_login + '&password=' + fakes.FakeData.emc_nas_password + '&Login=Login' ) @staticmethod def req_url_encode(): return {'Content-Type': 'application/x-www-form-urlencoded'} @staticmethod def req_url(): return ( 'https://' + fakes.FakeData.emc_nas_server + '/servlets/CelerraManagementServices' ) XML_CONN_TD = XMLAPIConnectorTestData class XMLAPIConnectorTest(test.TestCase): @mock.patch.object(url_request, 'Request', mock.Mock()) def setUp(self): super(XMLAPIConnectorTest, self).setUp() emc_share_driver = fakes.FakeEMCShareDriver() self.configuration = emc_share_driver.configuration xml_socket = mock.Mock() xml_socket.read = mock.Mock(return_value=XML_CONN_TD.FAKE_RESP) opener = mock.Mock() opener.open = mock.Mock(return_value=xml_socket) with mock.patch.object(url_request, 'build_opener', mock.Mock(return_value=opener)): self.XmlConnector = connector.XMLAPIConnector( configuration=self.configuration, debug=False) expected_calls = [ mock.call(XML_CONN_TD.req_auth_url(), XML_CONN_TD.req_credential(), XML_CONN_TD.req_url_encode()), ] url_request.Request.assert_has_calls(expected_calls) def test_request_with_debug(self): self.XmlConnector.debug = True request = mock.Mock() request.headers = {XML_CONN_TD.FAKE_KEY: XML_CONN_TD.FAKE_VALUE} request.get_full_url = mock.Mock( return_value=XML_CONN_TD.FAKE_VALUE) with mock.patch.object(url_request, 'Request', mock.Mock(return_value=request)): rsp = self.XmlConnector.request(XML_CONN_TD.FAKE_BODY, XML_CONN_TD.FAKE_METHOD) self.assertEqual(XML_CONN_TD.FAKE_RESP, rsp) def test_request_with_no_authorized_exception(self): xml_socket = mock.Mock() xml_socket.read = mock.Mock(return_value=XML_CONN_TD.FAKE_RESP) hook = emc_utils.RequestSideEffect() hook.append(ex=url_error.HTTPError(XML_CONN_TD.req_url(), '403', 'fake_message', None, None)) hook.append(xml_socket) hook.append(xml_socket) self.XmlConnector.url_opener.open = mock.Mock(side_effect=hook) self.XmlConnector.request(XML_CONN_TD.FAKE_BODY) def test_request_with_general_exception(self): hook = emc_utils.RequestSideEffect() hook.append(ex=url_error.HTTPError(XML_CONN_TD.req_url(), 'error_code', 'fake_message', None, None)) self.XmlConnector.url_opener.open = mock.Mock(side_effect=hook) self.assertRaises(exception.ManilaException, self.XmlConnector.request, XML_CONN_TD.FAKE_BODY) class MockSSH(object): def __enter__(self): return self def __exit__(self, type, value, traceback): pass class MockSSHPool(object): def __init__(self): self.ssh = MockSSH() def item(self): try: return self.ssh finally: pass class CmdConnectorTest(test.TestCase): def setUp(self): super(CmdConnectorTest, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.emc_nas_login = fakes.FakeData.emc_nas_login self.configuration.emc_nas_password = fakes.FakeData.emc_nas_password self.configuration.emc_nas_server = fakes.FakeData.emc_nas_server self.sshpool = MockSSHPool() with mock.patch.object(utils, "SSHPool", mock.Mock(return_value=self.sshpool)): self.CmdHelper = connector.SSHConnector( configuration=self.configuration, debug=False) utils.SSHPool.assert_called_once_with( ip=fakes.FakeData.emc_nas_server, port=22, conn_timeout=None, login=fakes.FakeData.emc_nas_login, password=fakes.FakeData.emc_nas_password) def test_run_ssh(self): with mock.patch.object(processutils, "ssh_execute", mock.Mock(return_value=('fake_output', ''))): cmd_list = ['fake', 'cmd'] self.CmdHelper.run_ssh(cmd_list) processutils.ssh_execute.assert_called_once_with( self.sshpool.item(), 'fake cmd', check_exit_code=False) def test_run_ssh_with_debug(self): self.CmdHelper.debug = True with mock.patch.object(processutils, "ssh_execute", mock.Mock(return_value=('fake_output', ''))): cmd_list = ['fake', 'cmd'] self.CmdHelper.run_ssh(cmd_list) processutils.ssh_execute.assert_called_once_with( self.sshpool.item(), 'fake cmd', check_exit_code=False) @mock.patch.object( processutils, "ssh_execute", mock.Mock(side_effect=processutils.ProcessExecutionError)) def test_run_ssh_exception(self): cmd_list = ['fake', 'cmd'] self.mock_object(greenthread, 'sleep', mock.Mock()) sshpool = MockSSHPool() with mock.patch.object(utils, "SSHPool", mock.Mock(return_value=sshpool)): self.CmdHelper = connector.SSHConnector(self.configuration) self.assertRaises(processutils.ProcessExecutionError, self.CmdHelper.run_ssh, cmd_list, True) utils.SSHPool.assert_called_once_with( ip=fakes.FakeData.emc_nas_server, port=22, conn_timeout=None, login=fakes.FakeData.emc_nas_login, password=fakes.FakeData.emc_nas_password) processutils.ssh_execute.assert_called_once_with( sshpool.item(), 'fake cmd', check_exit_code=True) manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/test_object_manager.py0000664000567000056710000036261012701407107030716 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import mock from oslo_concurrency import processutils from manila.common import constants as const from manila import exception from manila.share.drivers.emc.plugins.vnx import connector from manila.share.drivers.emc.plugins.vnx import constants from manila.share.drivers.emc.plugins.vnx import object_manager as manager from manila import test from manila.tests.share.drivers.emc.plugins.vnx import fakes from manila.tests.share.drivers.emc.plugins.vnx import utils class StorageObjectManagerTestCase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectManagerTestCase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver() self.manager = manager.StorageObjectManager( emd_share_driver.configuration) def test_get_storage_context(self): type_map = { 'FileSystem': manager.FileSystem, 'StoragePool': manager.StoragePool, 'MountPoint': manager.MountPoint, 'Mover': manager.Mover, 'VDM': manager.VDM, 'Snapshot': manager.Snapshot, 'MoverInterface': manager.MoverInterface, 'DNSDomain': manager.DNSDomain, 'CIFSServer': manager.CIFSServer, 'CIFSShare': manager.CIFSShare, 'NFSShare': manager.NFSShare, } for key, value in type_map.items(): self.assertTrue( isinstance(self.manager.getStorageContext(key), value)) for key in self.manager.context.keys(): self.assertTrue(key in type_map) def test_get_storage_context_invalid_type(self): fake_type = 'fake_type' self.assertRaises(exception.EMCVnxXMLAPIError, self.manager.getStorageContext, fake_type) class StorageObjectTestCase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectTestCase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver() self.manager = manager.StorageObjectManager( emd_share_driver.configuration) self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() class FileSystemTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_file_system_on_vdm(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_on_mover(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_but_already_exist(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_create_but_already_exist()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_file_system_invalid_mover_id(self, sleep_mock): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_file_system_with_error(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) self.hook.append(self.fs.resp_get_without_value()) self.hook.append(self.fs.resp_get_error()) self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_ERROR, status) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_miss_property(self): self.hook.append(self.fs.resp_get_but_miss_property()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) self.assertIsNone(out['dataServicePolicies']) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) def test_delete_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_but_get_file_system_error(self): self.hook.append(self.fs.resp_get_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_delete_but_failed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertIn(self.fs.filesystem_name, context.filesystem_map) def test_extend_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.fs.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_small_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=1) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_same_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_extend_but_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_filesystem_from_snapshot(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(self.fs.output_copy_ckpt) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot(self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id,) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_filesystem_from_snapshot_with_error(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.fs.fake_output, stderr=None)) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot( self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id, ) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class MountPointTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mount_point_on_vdm(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id, True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_on_mover(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_create_but_already_exist()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mount_point_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_but_nonexistent(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_delete_but_nonexistent()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mount_point_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_without_value()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_mount_points_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_mount_points_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) class VDMTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_vdm(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_vdm_but_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_create_but_already_exist()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM which already exists. context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_vdm_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_vdm_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.vdm.vdm_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.vdm_map) property_map = [ 'name', 'id', 'state', 'host_mover_id', 'interfaces', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM with error status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_without_value()) self.hook.append(self.vdm.resp_get_succeed('fake')) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM which does not exist status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_id_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_but_not_found()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_failed_to_get_vdm(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_attach_detach_nfs_interface(self): self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.attach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_detach_nfs_interface_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces( self.mover.interface_name2)) self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces( nfs_interface=fakes.FakeData.interface_name1)) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.detach_nfs_interface, self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_cifs_nfs_interface(self): self.ssh_hook.append(self.vdm.output_get_interfaces()) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) interfaces = context.get_interfaces(self.vdm.vdm_name) self.assertIsNotNone(interfaces['cifs']) self.assertIsNotNone(interfaces['nfs']) ssh_calls = [mock.call(self.vdm.cmd_get_interfaces(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class StoragePoolTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_get_pool(self): self.hook.append(self.pool.resp_get_succeed()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.pool.pool_name, context.pool_map) property_map = [ 'name', 'movers_id', 'total_size', 'used_size', 'diskType', 'dataServicePolicies', 'id', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_with_error(self): self.hook.append(self.pool.resp_get_error()) self.hook.append(self.pool.resp_get_without_value()) self.hook.append(self.pool.resp_get_succeed(name='other')) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_ERROR, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_id_with_error(self): self.hook.append(self.pool.resp_get_error()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.pool.pool_name) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) class MoverTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_get_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) status, out = context.get(self.mover.mover_name, True) self.assertEqual(constants.STATUS_OK, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed(name='other')) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_with_error(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_and_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = ['name', 'id'] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_failed_to_get_mover_ref(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get, self.mover.mover_name) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.mover_name, force=True) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_interconnect_id(self): self.ssh_hook.append(self.mover.output_get_interconnect_id()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) conn_id = context.get_interconnect_id(self.mover.mover_name, self.mover.mover_name) self.assertEqual(self.mover.interconnect_id, conn_id) ssh_calls = [mock.call(self.mover.cmd_get_interconnect_id(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_physical_devices(self): self.ssh_hook.append(self.mover.output_get_physical_devices()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) devices = context.get_physical_devices(self.mover.mover_name) self.assertIn(self.mover.device_name, devices) ssh_calls = [mock.call(self.mover.cmd_get_physical_devices(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class SnapshotTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_create_snapshot(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_create_but_already_exist()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.snap.snapshot_name, context.snap_map) property_map = [ 'name', 'id', 'checkpointOf', 'state', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_failed_to_get_snapshot(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_with_error(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) id = context.get_id(self.snap.snapshot_name) self.assertEqual(self.snap.snapshot_id, id) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.ddt class MoverInterfaceTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) interface['name'] = self.mover.long_interface_name context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_create_interface( self.mover.long_interface_name[:31])), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_name_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_name_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_ip_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_ip_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.data(fakes.MoverTestData().resp_task_succeed(), fakes.MoverTestData().resp_task_error()) def test_create_mover_interface_with_conflict_vlan_id(self, xml_resp): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_with_conflicted_vlan_id()) self.hook.append(xml_resp) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'name', 'device', 'up', 'ipVersion', 'netMask', 'ipAddress', 'vlanid', ] for prop in property_map: self.assertIn(prop, out) context.get(name=self.mover.long_interface_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface_but_nonexistent(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_delete_interface_but_nonexistent()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) class DNSDomainTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_create_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_dns_domain_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, mover_name=self.mover.mover_name, name=self.mover.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) class CIFSServerTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() def test_create_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_cifs_server_already_exist(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) @mock.patch('time.sleep') def test_create_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_all_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) # Get CIFS server from the cache status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_all_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.cifs_server_map) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = { 'name', 'compName', 'Aliases', 'type', 'interfaces', 'domain', 'domainJoined', 'mover', 'moverIdIsVdm', } for prop in property_map: self.assertIn(prop, out) context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_unjoin_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_modify_but_unjoin_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_already_join_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append( self.cifs_server.resp_modify_but_already_join_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_modify_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_modify_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } self.assertRaises(exception.EMCVnxXMLAPIError, context.modify, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_but_not_found(self): self.hook.append(self.mover.resp_get_without_value()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_without_value()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) class CIFSShareTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_cifs_share(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_share_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share_not_found(self): self.hook.append(self.cifs_share.resp_get_error()) self.hook.append(self.cifs_share.resp_get_without_value()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.cifs_share.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_cifs_share_with_error(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.get(self.cifs_share.share_name) expected_calls = [mock.call(self.cifs_share.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_disable_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.disable_share_access(share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_disable_share_access_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.cifs_share.fake_output)) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.disable_share_access, share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): self.ssh_hook.append(self.cifs_share.output_allow_access()) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_duplicate_ACE(self): expt_dup_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_allow_access_but_duplicate_ace()) self.ssh_hook.append(ex=expt_dup_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.allow_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_no_ace(self): expt_no_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_ace()) self.ssh_hook.append(ex=expt_no_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_but_no_user_found(self): expt_no_user = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_user_found()) self.ssh_hook.append(ex=expt_no_user) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class NFSShareTestCase(StorageObjectTestCase): def setUp(self): super(self.__class__, self).setUp() self.ssh_hook = utils.SSHSideEffect() def test_create_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_create()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) @mock.patch('time.sleep') def test_delete_nfs_share_locked(self, sleep_mock): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_locked = processutils.ProcessExecutionError( stdout=self.nfs_share.output_delete_but_locked()) self.ssh_hook.append(ex=expt_locked) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) self.assertTrue(sleep_mock.called) def test_delete_nfs_share_with_error(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) # Get NFS share from cache context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) self.ssh_hook.append(self.nfs_share.output_get_but_not_found()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_get(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RO) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.allow_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_ro_share_access(self): ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_with_error(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/utils.py0000664000567000056710000001122612701407112026045 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import doctest from lxml import doctestcompare import mock from oslo_log import log import six LOG = log.getLogger(__name__) CHECKER = doctestcompare.LXMLOutputChecker() PARSE_XML = doctest.register_optionflag('PARSE_XML') class RequestSideEffect(object): def __init__(self): self.actions = [] self.started = False def append(self, resp=None, ex=None): if not self.started: self.actions.append((resp, ex)) def __call__(self, *args, **kwargs): if not self.started: self.started = True self.actions.reverse() item = self.actions.pop() if item[1]: raise item[1] else: return item[0] class SSHSideEffect(object): def __init__(self): self.actions = [] self.started = False def append(self, resp=None, err=None, ex=None): if not self.started: self.actions.append((resp, err, ex)) def __call__(self, rel_url, req_data=None, method=None, return_rest_err=True, *args, **kwargs): if not self.started: self.started = True self.actions.reverse() item = self.actions.pop() if item[2]: raise item[2] else: if return_rest_err: return item[0:2] else: return item[1] class EMCMock(mock.Mock): def _get_req_from_call(self, call): if len(call) == 3: return call[1][0] elif len(call) == 2: return call[0][0] def assert_has_calls(self, calls): if len(calls) != len(self.mock_calls): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) iter_expect = iter(calls) iter_actual = iter(self.mock_calls) while True: try: expect = self._get_req_from_call(next(iter_expect)) actual = self._get_req_from_call(next(iter_actual)) except StopIteration: return True if not isinstance(expect, six.binary_type): expect = six.b(expect) if not isinstance(actual, six.binary_type): actual = six.b(actual) if not CHECKER.check_output(expect, actual, PARSE_XML): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) class EMCNFSShareMock(mock.Mock): def assert_has_calls(self, calls): if len(calls) != len(self.mock_calls): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) iter_expect = iter(calls) iter_actual = iter(self.mock_calls) while True: try: expect = next(iter_expect)[1][0] actual = next(iter_actual)[1][0] except StopIteration: return True if not self._option_check(expect, actual): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) def _option_parser(self, option): option_map = {} for item in option.split(','): key, value = item.split('=') option_map[key] = value return option_map def _option_check(self, expect, actual): if '-option' in actual and '-option' in expect: exp_option = expect[expect.index('-option') + 1] act_option = actual[actual.index('-option') + 1] exp_opt_map = self._option_parser(exp_option) act_opt_map = self._option_parser(act_option) for key in exp_opt_map: exp_set = set(exp_opt_map[key].split(':')) act_set = set(act_opt_map[key].split(':')) if exp_set != act_set: return False return True manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/__init__.py0000664000567000056710000000000012701407107026434 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/test_connection.py0000664000567000056710000016172112701407107030115 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import mock from oslo_log import log from manila import exception from manila.share.drivers.emc.plugins.vnx import connection from manila.share.drivers.emc.plugins.vnx import connector from manila.share.drivers.emc.plugins.vnx import object_manager from manila import test from manila.tests import fake_share from manila.tests.share.drivers.emc.plugins.vnx import fakes from manila.tests.share.drivers.emc.plugins.vnx import utils LOG = log.getLogger(__name__) @ddt.ddt class StorageConnectionTestCase(test.TestCase): @mock.patch.object(connector.XMLAPIConnector, "_do_setup", mock.Mock()) def setUp(self): super(StorageConnectionTestCase, self).setUp() self.emc_share_driver = fakes.FakeEMCShareDriver() self.connection = connection.VNXStorageConnection(LOG) self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() with mock.patch.object(connector.XMLAPIConnector, 'request', mock.Mock()): self.connection.connect(self.emc_share_driver, None) def test_check_for_setup_error(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock with mock.patch.object(connection.VNXStorageConnection, '_get_managed_storage_pools', mock.Mock()): self.connection.check_for_setup_error() expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) def test_check_for_setup_error_with_invalid_mover_name(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.InvalidParameterValue, self.connection.check_for_setup_error) expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data({'pool_conf': None, 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': set()}, {'pool_conf': '*', 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': {'fake_pool', 'nas_pool'}}, {'pool_conf': 'fake_*', 'real_pools': ['fake_pool', 'nas_pool', 'Perf_Pool'], 'matched_pool': {'fake_pool'}}, {'pool_conf': '*pool', 'real_pools': ['fake_pool', 'NAS_Pool', 'Perf_POOL'], 'matched_pool': {'fake_pool'}}, {'pool_conf': 'nas_pool', 'real_pools': ['fake_pool', 'nas_pool', 'perf_pool'], 'matched_pool': {'nas_pool'}}) @ddt.unpack def test__get_managed_storage_pools(self, pool_conf, real_pools, matched_pool): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): pool = self.connection._get_managed_storage_pools(pool_conf) self.assertEqual(matched_pool, pool) def test__get_managed_storage_pools_failed_to_get_pool_info(self): hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_conf = fakes.FakeData.pool_name self.assertRaises(exception.EMCVnxXMLAPIError, self.connection._get_managed_storage_pools, pool_conf) expected_calls = [mock.call(self.pool.req_get())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data( {'pool_conf': 'fake_*', 'real_pools': ['nas_pool', 'Perf_Pool']}, {'pool_conf': '*pool', 'real_pools': ['NAS_Pool', 'Perf_POOL']}, {'pool_conf': 'nas_pool', 'real_pools': ['fake_pool', 'perf_pool']}, ) @ddt.unpack def test__get_managed_storage_pools_without_matched_pool(self, pool_conf, real_pools): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): self.assertRaises(exception.InvalidParameterValue, self.connection._get_managed_storage_pools, pool_conf) def test_create_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, r'\\192.168.1.1\%s' % share['name'], 'CIFS export path is incorrect') def test_create_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, '192.168.1.2:/%s' % share['name'], 'NFS export path is incorrect') def test_create_cifs_share_without_share_server(self): share = fakes.CIFS_SHARE self.assertRaises(exception.InvalidInput, self.connection.create_share, None, share, None) def test_create_cifs_share_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details']['share_server_name'] = None self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) def test_create_cifs_share_with_invalide_cifs_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_interface_in_cifs_server(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_interface( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') self.assertRaises(exception.InvalidHost, self.connection.create_share, None, share, share_server) def test_create_cifs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.cifs_share.cmd_disable_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, r'\\192.168.1.1\%s' % share['name'], 'CIFS export path is incorrect') def test_create_nfs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.nfs_share.cmd_create(), True) ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, '192.168.1.2:/%s' % share['name'], 'NFS export path is incorrect') def test_create_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.create_share, context=None, share=share, share_server=share_server) def test_create_share_from_snapshot_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidShare, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_create_share_from_snapshot_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidHost, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_delete_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) ssh_hook.append(self.nfs_share.output_delete_succeed()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_delete_share_without_share_server(self): share = fakes.CIFS_SHARE self.connection.delete_share(None, share) def test_delete_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.delete_share, context=None, share=share, share_server=share_server) def test_delete_cifs_share_with_nonexistent_mount_and_filesystem(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_error()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE new_size = fakes.FakeData.new_size hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.extend_share(share, new_size, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') new_size = fakes.FakeData.new_size self.assertRaises(exception.InvalidHost, self.connection.extend_share, share, new_size, share_server) def test_create_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.create_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_snapshot_with_incorrect_share_info(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_but_not_found()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_snapshot, None, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_delete_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.snap.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_setup_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_physical_devices()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_setup_server_with_existing_vdm(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_physical_devices()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_setup_server_with_invalid_security_service(self): network_info = copy.deepcopy(fakes.NETWORK_INFO) network_info['security_services'][0]['type'] = 'fake_type' self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, network_info, None) def test_setup_server_without_valid_physical_device(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.fake_output) ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_setup_server_with_exception(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_error()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_physical_devices()) ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_server_detail(self): self.connection.teardown_server(None, fakes.SECURITY_SERVICE) def test_teardown_server_without_security_services(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, []) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_share_server_name_in_server_detail(self): server_detail = { 'cifs_if': fakes.FakeData.network_allocations_ip1, 'nfs_if': fakes.FakeData.network_allocations_ip2, } self.connection.teardown_server(server_detail, fakes.SECURITY_SERVICE) def test_teardown_server_with_invalid_server_name(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [mock.call(self.vdm.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_teardown_server_without_cifs_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_with_invalid_cifs_server_modification(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_error()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_ro_access_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details'].pop('share_server_name') access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_access_with_invalid_access_level(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_level='fake_level') self.assertRaises(exception.InvalidShareAccessLevel, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_invalid_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.allow_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_allow_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fake_share.fake_access() self.assertRaises(exception.InvalidShare, self.connection.allow_access, None, share, access, share_server) def test_deny_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_access_with_invliad_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.deny_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_deny_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fakes.CIFS_RW_ACCESS self.assertRaises(exception.InvalidShare, self.connection.deny_access, None, share, access, share_server) def test_deny_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_deny_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_update_share_stats(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.update_share_stats(fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) for pool in fakes.STATS['pools']: if pool['pool_name'] == fakes.FakeData.pool_name: self.assertEqual(fakes.FakeData.pool_total_size, pool['total_capacity_gb']) free_size = (fakes.FakeData.pool_total_size - fakes.FakeData.pool_used_size) self.assertEqual(free_size, pool['free_capacity_gb']) def test_update_share_stats_without_matched_config_pools(self): self.connection.pools = set('fake_pool') hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.update_share_stats, fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_name = self.connection.get_pool(share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) self.assertEqual(fakes.FakeData.pool_name, pool_name) def test_get_pool_failed_to_get_filesystem_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_get_pool_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_find_matched_pool_name(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed(name='unmatch_pool_name', id='unmatch_pool_id')) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) manila-2.0.0/manila/tests/share/drivers/emc/plugins/vnx/fakes.py0000664000567000056710000015321512701407107026007 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import units from manila.common import constants as const from manila.share import configuration as conf from manila.tests import fake_share def query(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '' ) return inner def start_task(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '') return inner def response(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '' ) return inner class FakeData(object): # Share information share_id = '7cf7c200_d3af_4e05_b87e_9167c95df4f9' host = 'HostA@BackendB#fake_pool_name' share_name = share_id share_size = 10 new_size = 20 src_share_name = '7cf7c200_d3af_4e05_b87e_9167c95df4f0' # Snapshot information snapshot_name = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26451' src_snap_name = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26452' snapshot_id = 'fake_snap_id' snapshot_size = 10 * units.Ki # Share network information share_network_id = 'c5b3a865-56d0-4d88-abe5-879965e099c9' cidr = '192.168.1.0/24' segmentation_id = 100 network_allocations_id1 = '132dbb10-9a36-46f2-8d89-3d909830c356' network_allocations_id2 = '7eabdeed-bad2-46ea-bd0f-a33884c869e0' network_allocations_ip1 = '192.168.1.1' network_allocations_ip2 = '192.168.1.2' domain_name = 'fake_domain' domain_user = 'administrator' domain_password = 'password' dns_ip_address = '192.168.1.200' # Share server information share_server_id = '56aafd02-4d44-43d7-b784-57fc88167224' # Filesystem information filesystem_name = share_name filesystem_id = 'fake_filesystem_id' filesystem_size = 10 * units.Ki filesystem_new_size = 20 * units.Ki # Mountpoint information path = '/' + share_name # Mover information mover_name = 'server_2' mover_id = 'fake_mover_id' interface_name1 = network_allocations_id1[-12:] interface_name2 = network_allocations_id2[-12:] long_interface_name = network_allocations_id1 net_mask = '255.255.255.0' device_name = 'cge-1-0' interconnect_id = '2001' # VDM information vdm_name = share_server_id vdm_id = 'fake_vdm_id' # Pool information pool_name = 'fake_pool_name' pool_id = 'fake_pool_id' pool_used_size = 20480 pool_total_size = 511999 # NFS share access information rw_hosts = ['192.168.1.1', '192.168.1.2'] ro_hosts = ['192.168.1.3', '192.168.1.4'] nfs_host_ip = '192.168.1.5' fake_output = '' fake_error_msg = 'fake error message' emc_share_backend = 'vnx' emc_nas_server = '192.168.1.20' emc_nas_login = 'fakename' emc_nas_password = 'fakepassword' share_backend_name = 'EMC_NAS_Storage' class StorageObjectTestData(object): def __init__(self): self.share_name = FakeData.share_name self.filesystem_name = FakeData.filesystem_name self.filesystem_id = FakeData.filesystem_id self.filesystem_size = 10 * units.Ki self.filesystem_new_size = 20 * units.Ki self.path = FakeData.path self.snapshot_name = FakeData.snapshot_name self.snapshot_id = FakeData.snapshot_id self.snapshot_size = 10 * units.Ki self.src_snap_name = FakeData.src_snap_name self.src_fileystems_name = FakeData.src_share_name self.mover_name = FakeData.mover_name self.mover_id = FakeData.mover_id self.vdm_name = FakeData.vdm_name self.vdm_id = FakeData.vdm_id self.pool_name = FakeData.pool_name self.pool_id = FakeData.pool_id self.pool_used_size = FakeData.pool_used_size self.pool_total_size = FakeData.pool_total_size self.interface_name1 = FakeData.interface_name1 self.interface_name2 = FakeData.interface_name2 self.long_interface_name = FakeData.long_interface_name self.ip_address1 = FakeData.network_allocations_ip1 self.ip_address2 = FakeData.network_allocations_ip2 self.net_mask = FakeData.net_mask self.vlan_id = FakeData.segmentation_id self.cifs_server_name = FakeData.vdm_name self.domain_name = FakeData.domain_name self.domain_user = FakeData.domain_user self.domain_password = FakeData.domain_password self.dns_ip_address = FakeData.dns_ip_address self.device_name = FakeData.device_name self.interconnect_id = FakeData.interconnect_id self.rw_hosts = FakeData.rw_hosts self.ro_hosts = FakeData.ro_hosts self.nfs_host_ip = FakeData.nfs_host_ip self.fake_output = FakeData.fake_output @response def resp_get_error(self): return ( '' '' 'Fake description.' 'Fake action.' 'Fake diagnostics.' '' '' 'Fake description.' 'Fake action.' 'Fake diagnostics.' '' ' ' ) @response def resp_get_without_value(self): return ( '' ) @response def resp_task_succeed(self): return ( '' '' '' ) @response def resp_task_error(self): return ( '' '' '' ) @response def resp_invalid_mover_id(self): return ( '' '' 'The Mover ID supplied with the request is invalid.' '' 'Refer to the XML API v2 schema/documentation and correct ' 'your user program logic.' ' Exception tag: 14fb692e556 Exception ' 'message: com.emc.nas.ccmd.common.MessageInstanceImpl@5004000d ' '' '' ' ' ) class FileSystemTestData(StorageObjectTestData): def __init__(self): super(FileSystemTestData, self).__init__() @start_task def req_create_on_vdm(self): return ( '' '' '' '' % {'name': self.filesystem_name, 'id': self.vdm_id, 'pool_id': self.pool_id, 'size': self.filesystem_size} ) @start_task def req_create_on_mover(self): return ( '' '' '' '' % {'name': self.filesystem_name, 'id': self.mover_id, 'pool_id': self.pool_id, 'size': self.filesystem_size} ) @response def resp_create_but_already_exist(self): return ( ' ' '' '' '' '' '' '' ' ' ) @start_task def req_delete(self): return ( '' % {'id': self.filesystem_id} ) @response def resp_delete_but_failed(self): return ( '' '' 'The file system ID supplied with the request is ' 'invalid.' 'Refer to the XML API v2 schema/documentation and correct ' 'your user program logic.' ' Exception tag: 14fb6b6a7b8 Exception ' 'message: com.emc.nas.ccmd.common.MessageInstanceImpl@5004000e ' '' '' ' ' ) @start_task def req_extend(self): return ( '' '' '' % {'id': self.filesystem_id, 'pool_id': self.pool_id, 'size': self.filesystem_new_size - self.filesystem_size} ) @response def resp_extend_but_error(self): return ( '' '' 'Fake description.' 'Fake action.' ' Fake diagnostics.' '' ' ' ) @query def req_get(self): return ( '' '' '' '' % {'name': self.filesystem_name} ) @response def resp_get_succeed(self): return ( '' '' '' '' '' % {'name': self.filesystem_name, 'id': self.filesystem_id, 'size': self.filesystem_size, 'pool_id': self.pool_id} ) @response def resp_get_but_miss_property(self): return ( '' '' '' '' '' % {'name': self.filesystem_name, 'id': self.filesystem_id, 'size': self.filesystem_size, 'pool_id': self.pool_id} ) @response def resp_get_but_not_found(self): return ( '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error. ' 'If the entire Celerra is functioning correctly, ' 'check your client application logic. ' 'File system not found.' '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error.' 'If the entire Celerra is functioning correctly, ' 'check your client application logic.' 'Migration file system not found.' '' ' ' ) def cmd_create_from_ckpt(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-name', self.filesystem_name, '-type', 'uxfs', '-create', 'samesize=' + self.src_fileystems_name, 'pool=' + self.pool_name, 'storage=SINGLE', 'worm=off', '-thin', 'no', '-option', 'slice=y', ] def cmd_copy_ckpt(self): session_name = self.filesystem_name + ':' + self.src_snap_name return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_copy', '-name', session_name[0:63], '-source', '-ckpt', self.src_snap_name, '-destination', '-fs', self.filesystem_name, '-interconnect', "id=" + self.interconnect_id, '-overwrite_destination', '-full_copy', ] output_copy_ckpt = "OK" error_copy_ckpt = "ERROR" def cmd_nas_fs_info(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-info', self.filesystem_name, ] def output_info(self): return ( """output = id = 515 name = %(share_name)s acl = 0 in_use = True type = uxfs worm = off volume = v993 deduplication = Off thin_storage = True tiering_policy = Auto-Tier/Optimize Pool compressed= False mirrored = False ckpts = %(ckpt)s stor_devs = FNM00124500890-004B disks = d7 disk=d7 fakeinfo""" % {'share_name': self.filesystem_name, 'ckpt': self.snapshot_name}) def cmd_delete(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-delete', self.snapshot_name, '-Force', ] class SnapshotTestData(StorageObjectTestData): def __init__(self): super(SnapshotTestData, self).__init__() @start_task def req_create(self): return ( '' '' '' % {'fsid': self.filesystem_id, 'name': self.snapshot_name, 'pool_id': self.pool_id} ) @start_task def req_create_with_size(self): return ( '' '' '' '' % {'fsid': self.filesystem_id, 'name': self.snapshot_name, 'pool_id': self.pool_id, 'size': self.snapshot_size} ) @response def resp_create_but_already_exist(self): return ( '' '' '' '' '' '' ) @query def req_get(self): return ( '' '' % {'name': self.snapshot_name} ) @response def resp_get_succeed(self): return ( '' '' % {'name': self.snapshot_name, 'fs_id': self.filesystem_id, 'snap_id': self.snapshot_id} ) @start_task def req_delete(self): return ( '' % {'id': self.snapshot_id} ) class MountPointTestData(StorageObjectTestData): def __init__(self): super(MountPointTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True): return ( '' '' '' % {'path': self.path, 'fs_id': self.filesystem_id, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_create_but_already_exist(self): return ( '' '' '' ' ' '' '' ' ') @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' % {'path': self.path, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_delete_but_nonexistent(self): return ( '' ' ' ' ' '' '' ' ' ) @query def req_get(self, mover_id, is_vdm=True): return ( '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_get_succeed(self, mover_id, is_vdm=True): return ( '' '' '' '' % {'path': self.path, 'fsID': self.filesystem_id, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) def cmd_server_mount(self, mode): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', self.vdm_name, '-option', mode, self.filesystem_name, self.path, ] def cmd_server_umount(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_umount', self.vdm_name, '-perm', self.snapshot_name, ] class VDMTestData(StorageObjectTestData): def __init__(self): super(VDMTestData, self).__init__() @start_task def req_create(self): return ( '' % {'mover_id': self.mover_id, 'vdm_name': self.vdm_name} ) @response def resp_create_but_already_exist(self): return ( '' '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' ' ' ) @query def req_get(self): return '' @response def resp_get_succeed(self, name=None): if not name: name = self.vdm_name return ( '' '' '' '
  • %(interface1)s
  • %(interface2)s
  • ' '
    ' % {'vdm_name': name, 'vdm_id': self.vdm_id, 'mover_id': self.mover_id, 'interface1': self.interface_name1, 'interface2': self.interface_name2} ) @response def resp_get_but_not_found(self): return ( '' ) @start_task def req_delete(self): return '' % {'vdmid': self.vdm_id} def cmd_attach_nfs_interface(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', self.vdm_name, '-attach', self.interface_name2, ] def cmd_detach_nfs_interface(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', self.vdm_name, '-detach', self.interface_name2, ] def cmd_get_interfaces(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-i', '-vdm', self.vdm_name, ] def output_get_interfaces(self, cifs_interface=FakeData.interface_name1, nfs_interface=FakeData.interface_name2): return ( """id = %(vdmid)s name = %(name)s acl = 0 type = vdm server = server_2 rootfs = root_fs_vdm_vdm-fakeid I18N mode = UNICODE mountedfs = member_of = status : defined = enabled actual = loaded, active Interfaces to services mapping: interface=%(nfs_if_name)s :vdm interface=%(cifs_if_name)s :cifs""" % {'vdmid': self.vdm_id, 'name': self.vdm_name, 'nfs_if_name': nfs_interface, 'cifs_if_name': cifs_interface} ) class PoolTestData(StorageObjectTestData): def __init__(self): super(PoolTestData, self).__init__() @query def req_get(self): return ( '' ) @response def resp_get_succeed(self, name=None, id=None): if not name: name = self.pool_name if not id: id = self.pool_id return ( '' '' '' '' '' '' '' % {'name': name, 'id': id, 'pool_used_size': self.pool_used_size, 'pool_total_size': self.pool_total_size} ) class MoverTestData(StorageObjectTestData): def __init__(self): super(MoverTestData, self).__init__() @query def req_get_ref(self): return ( '' '' '' ) @response def resp_get_ref_succeed(self, name=None): if not name: name = self.mover_name return ( '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error.' 'If the entire Celerra is functioning correctly, ' 'check your client application logic.' 'Standby Data Mover server_2.faulted.server_3 is ' 'out of service.' '' '' '' '' % {'name': name, 'id': self.mover_id} ) @query def req_get(self): return ( '' '' '' % {'id': self.mover_id} ) @response def resp_get_succeed(self, name=None): if not name: name = self.mover_name return ( '' '' '' '' '' '' '' '' '' % {'id': self.mover_id, 'name': name, 'long_interface_name': self.long_interface_name[:31], 'interface_name1': self.interface_name1, 'interface_name2': self.interface_name2} ) @start_task def req_create_interface(self, if_name=FakeData.interface_name1, ip=FakeData.network_allocations_ip1): return ( '' % {'if_name': if_name, 'vlan': self.vlan_id, 'ip': ip, 'mover_id': self.mover_id, 'device_name': self.device_name, 'net_mask': self.net_mask} ) @response def resp_create_interface_but_name_already_exist(self): return ( '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' % {'interface_name': self.interface_name1} ) @response def resp_create_interface_but_ip_already_exist(self): return ( '' '' '' '' '' % {'ip': self.ip_address1} ) @response def resp_create_interface_with_conflicted_vlan_id(self): return ( '' '' 'The operation cannot complete because other ' 'interfaces on the same subnet are in a different VLAN. ' 'The Data Mover requires all interfaces in the same subnet ' 'to be in the same VLAN.' 'Specify a VLAN to match other interfaces in the same ' 'subnet. To move multiple interfaces to a different VLAN, ' 'first set the VLAN id on each interface to 0, ' 'and then set their VLAN id\'s to the new VLAN number.' '' '' ) @start_task def req_delete_interface(self, ip=FakeData.network_allocations_ip1): return ( '' % {'ip': ip, 'mover_id': self.mover_id, } ) @response def resp_delete_interface_but_nonexistent(self): return ( '' '' '' '' '' '' ) def cmd_get_interconnect_id(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_cel', '-interconnect', '-l', ] def output_get_interconnect_id(self): return ( 'id name source_server destination_system destination_server\n' '%(id)s loopback %(src_server)s nas149 %(dest_server)s\n' % {'id': self.interconnect_id, 'src_server': self.mover_name, 'dest_server': self.mover_name} ) def cmd_get_physical_devices(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', self.mover_name, '-pci', ] def output_get_physical_devices(self): return ( 'server_2 : PCI DEVICES:\n' 'On Board:\n' ' PMC QE8 Fibre Channel Controller\n' ' 0: fcp-0-0 IRQ: 20 addr: 5006016047a00245\n' ' 0: fcp-0-1 IRQ: 21 addr: 5006016147a00245\n' ' 0: fcp-0-2 IRQ: 22 addr: 5006016247a00245\n' ' 0: fcp-0-3 IRQ: 23 addr: 5006016347a00245\n' ' Broadcom Gigabit Ethernet Controller\n' ' 0: cge-1-0 IRQ: 24\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Up\n' ' 0: cge-1-1 IRQ: 25\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Down\n' ' 0: cge-1-2 IRQ: 26\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Down\n' ' 0: cge-1-3 IRQ: 27\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Down\n' 'Slot: 4\n' ' PLX PCI-Express Switch Controller\n' ' 1: PLX PEX8648 IRQ: 10\n' ) class DNSDomainTestData(StorageObjectTestData): def __init__(self): super(DNSDomainTestData, self).__init__() @start_task def req_create(self): return ( '' % {'mover_id': self.mover_id, 'domain_name': self.domain_name, 'server_ips': self.dns_ip_address} ) @start_task def req_delete(self): return ( '' % {'mover_id': self.mover_id, 'domain_name': self.domain_name} ) class CIFSServerTestData(StorageObjectTestData): def __init__(self): super(CIFSServerTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True): return ( '' '' '
  • %(alias)s
  • ' '' '
    ' % {'ip': self.ip_address1, 'comp_name': self.cifs_server_name, 'name': self.cifs_server_name[-14:], 'mover_id': mover_id, 'alias': self.cifs_server_name[-12:], 'domain_user': self.domain_user, 'domain_password': self.domain_password, 'domain': self.domain_name, 'is_vdm': 'true' if is_vdm else 'false'} ) @query def req_get(self, mover_id, is_vdm=True): return ( '' '' '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_get_succeed(self, mover_id, is_vdm, join_domain): return ( '' '' '
  • %(alias)s
  • ' % {'mover_id': mover_id, 'cifsserver': self.cifs_server_name[-14:], 'ip': self.ip_address1, 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:], 'domain': self.domain_name, 'join_domain': 'true' if join_domain else 'false', 'comp_name': self.cifs_server_name} ) @response def resp_get_without_interface(self, mover_id, is_vdm, join_domain): return ( '' '' '
  • %(alias)s
  • ' '
    ' % {'mover_id': mover_id, 'cifsserver': self.cifs_server_name[-14:], 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:], 'domain': self.domain_name, 'join_domain': 'true' if join_domain else 'false', 'comp_name': self.cifs_server_name} ) @start_task def req_modify(self, mover_id, is_vdm=True, join_domain=False): return ( '' '' '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'join_domain': 'true' if join_domain else 'false', 'cifsserver': self.cifs_server_name[-14:], 'username': self.domain_user, 'pw': self.domain_password} ) @response def resp_modify_but_already_join_domain(self): return ( ' ' '' 'Fake description' 'Fake action.' '' ' ' ) @response def resp_modify_but_unjoin_domain(self): return ( ' ' '' 'Fake description' 'Fake action.' '' ' ' ) @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-14:]} ) class CIFSShareTestData(StorageObjectTestData): def __init__(self): super(CIFSShareTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True): return ( '' '' '
  • %(cifsserver)s
  • ' '
    ' % {'path': '/' + self.share_name, 'share_name': self.share_name, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-14:]} ) @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' '
  • %(cifsserver)s
  • ' '
    ' % {'share_name': self.share_name, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-12:]} ) @query def req_get(self): return '' % self.share_name @response def resp_get_succeed(self, mover_id, is_vdm=True): return ( '' '' '
  • %(alias)s
  • ' '
    ' '
    ' % {'path': self.path, 'fsid': self.filesystem_id, 'name': self.share_name, 'moverid': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:]} ) def cmd_disable_access(self): cmd_str = 'sharesd %s set noaccess' % self.share_name return [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', self.vdm_name, '-v', '%s' % cmd_str, ] def cmd_change_access(self, access_level=const.ACCESS_LEVEL_RW, action='grant'): account = self.domain_user + '@' + self.domain_name if access_level == const.ACCESS_LEVEL_RW: str_access = 'fullcontrol' else: str_access = 'read' allow_str = ( 'sharesd %(share_name)s %(action)s %(account)s=%(access)s' % {'share_name': self.share_name, 'action': action, 'account': account, 'access': str_access} ) return [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', self.vdm_name, '-v', '%s' % allow_str, ] def output_allow_access(self): return ( "Command succeeded: :3 sharesd %(share)s grant " "%(user)s@%(domain)s=fullcontrol" % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name} ) def output_allow_access_but_duplicate_ace(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443422844: SMB: 6: ACE for %(domain)s\\%(user)s ' 'unchanged' '1443422844: ADMIN: 3: ' 'Command failed: :23 ' 'sharesd %(share)s grant %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) def output_deny_access_but_no_ace(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443515516: SMB: 6: No ACE found for %(domain)s\\%(user)s ' '1443515516: ADMIN: 3: ' 'Command failed: :26 ' 'sharesd %(share)s revoke %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) def output_deny_access_but_no_user_found(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443520322: SMB: 6: Cannot get mapping for %(domain)s\\%(user)s ' '1443520322: ADMIN: 3: ' 'Command failed: :26 ' 'sharesd %(share)s revoke %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) class NFSShareTestData(StorageObjectTestData): def __init__(self): super(NFSShareTestData, self).__init__() def cmd_create(self): default_access = 'access=-0.0.0.0/0.0.0.0' return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-option', default_access, self.path, ] def output_create(self): return "%s : done" % self.vdm_name def cmd_get(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-P', 'nfs', '-list', self.path, ] def output_get_succeed(self, rw_hosts, ro_hosts): if rw_hosts and ro_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=-0.0.0.0/0.0.0.0:%(host)s root=%(host)s ' 'rw=%(rw_host)s ro=%(ro_host)s\n' % {'mover_name': self.vdm_name, 'path': self.path, 'host': ":".join(rw_hosts + ro_hosts), 'rw_host': ":".join(rw_hosts), 'ro_host': ":".join(ro_hosts)} ) elif rw_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=-0.0.0.0/0.0.0.0:%(host)s root=%(host)s ' 'rw=%(rw_host)s\n' % {'mover_name': self.vdm_name, 'host': rw_hosts, 'path': self.path, 'rw_host': ":".join(rw_hosts)} ) elif ro_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=-0.0.0.0/0.0.0.0:%(host)s root=%(host)s ' 'ro=%(ro_host)s\n' % {'mover_name': self.vdm_name, 'host': ro_hosts, 'path': self.path, 'ro_host': ":".join(ro_hosts)} ) else: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=-0.0.0.0/0.0.0.0\n' % {'mover_name': self.vdm_name, 'path': self.path} ) def output_get_but_not_found(self): return ( '%(mover_name)s : \nError 2: %(mover_name)s : ' 'No such file or directory \n' % {'mover_name': self.vdm_name} ) def cmd_delete(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-unexport', '-perm', self.path, ] def output_delete_succeed(self): return "%s : done" % self.vdm_name def output_delete_but_locked(self): return ("Error 2201: %s : unable to acquire lock(s), try later" % self.vdm_name) def cmd_set_access(self, rw_hosts, ro_hosts): access_str = ("access=-0.0.0.0/0.0.0.0:%(access_hosts)s," "root=%(root_hosts)s,rw=%(rw_hosts)s,ro=%(ro_hosts)s" % {'rw_hosts': ":".join(rw_hosts), 'ro_hosts': ":".join(ro_hosts), 'root_hosts': ":".join(rw_hosts + ro_hosts), 'access_hosts': ":".join(rw_hosts + ro_hosts)}) return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-ignore', '-option', access_str, self.path, ] def output_set_access_success(self): return "%s : done" % self.vdm_name class FakeEMCShareDriver(object): def __init__(self): self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.emc_share_backend = FakeData.emc_share_backend self.configuration.emc_nas_server_container = FakeData.mover_name self.configuration.emc_nas_server = FakeData.emc_nas_server self.configuration.emc_nas_login = FakeData.emc_nas_login self.configuration.emc_nas_password = FakeData.emc_nas_password self.configuration.share_backend_name = FakeData.share_backend_name CIFS_SHARE = fake_share.fake_share( id=FakeData.share_id, name=FakeData.share_name, size=FakeData.share_size, share_network_id=FakeData.share_network_id, share_server_id=FakeData.share_server_id, host=FakeData.host, share_proto='CIFS') NFS_SHARE = fake_share.fake_share( id=FakeData.share_id, name=FakeData.share_name, size=FakeData.share_size, share_network_id=FakeData.share_network_id, share_server_id=FakeData.share_server_id, host=FakeData.host, share_proto='NFS') CIFS_RW_ACCESS = fake_share.fake_access( access_type='user', access_to=FakeData.domain_user, access_level='rw') CIFS_RO_ACCESS = fake_share.fake_access( access_type='user', access_to=FakeData.domain_user, access_level='ro') NFS_RW_ACCESS = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ip, access_level='rw') NFS_RO_ACCESS = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ip, access_level='ro') SHARE_SERVER = { 'id': FakeData.share_server_id, 'share_network': { 'name': 'fake_share_network', 'id': FakeData.share_network_id }, 'share_network_id': FakeData.share_network_id, 'backend_details': { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip1, 'nfs_if': FakeData.network_allocations_ip2, } } SERVER_DETAIL = { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip1, 'nfs_if': FakeData.network_allocations_ip2, } SECURITY_SERVICE = [ { 'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ip_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password }, ] NETWORK_INFO = { 'server_id': FakeData.share_server_id, 'cidr': FakeData.cidr, 'security_services': [ {'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ip_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password}, ], 'segmentation_id': FakeData.segmentation_id, 'network_type': 'vlan', 'network_allocations': [ {'id': FakeData.network_allocations_id1, 'ip_address': FakeData.network_allocations_ip1}, {'id': FakeData.network_allocations_id2, 'ip_address': FakeData.network_allocations_ip2} ] } STATS = dict( share_backend_name='VNX', vendor_name='EMC', storage_protocol='NFS_CIFS', driver_version='2.0.0,') manila-2.0.0/manila/tests/share/drivers/emc/plugins/__init__.py0000664000567000056710000000000012701407107025621 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/isilon/0000775000567000056710000000000012701407265025024 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/isilon/__init__.py0000664000567000056710000000000012701407107027116 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/emc/plugins/isilon/test_isilon_api.py0000664000567000056710000010164412701407112030560 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_serialization import jsonutils as json import requests import requests_mock import six from manila import exception from manila.share.drivers.emc.plugins.isilon import isilon_api from manila import test @ddt.ddt class IsilonApiTest(test.TestCase): def setUp(self): super(IsilonApiTest, self).setUp() self._mock_url = 'https://localhost:8080' _mock_auth = ('admin', 'admin') self.isilon_api = isilon_api.IsilonApi( self._mock_url, _mock_auth ) @ddt.data(False, True) def test_create_directory(self, is_recursive): with requests_mock.Mocker() as m: path = '/ifs/test' self.assertEqual(0, len(m.request_history)) self._add_create_directory_response(m, path, is_recursive) r = self.isilon_api.create_directory(path, recursive=is_recursive) self.assertTrue(r) self.assertEqual(1, len(m.request_history)) request = m.request_history[0] self._verify_dir_creation_request(request, path, is_recursive) @requests_mock.mock() def test_clone_snapshot(self, m): snapshot_name = 'snapshot01' fq_target_dir = '/ifs/admin/target' self.assertEqual(0, len(m.request_history)) self._add_create_directory_response(m, fq_target_dir, False) snapshots_json = ( '{"snapshots": ' '[{"name": "snapshot01", "path": "/ifs/admin/source"}]' '}' ) self._add_get_snapshot_response(m, snapshot_name, snapshots_json) # In order to test cloning a snapshot, we build out a mock # source directory tree. After the method under test is called we # will verify the the necessary calls are made to clone a snapshot. source_dir_listing_json = ( '{"children": [' '{"name": "dir1", "type": "container"},' '{"name": "dir2", "type": "container"},' '{"name": "file1", "type": "object"},' '{"name": "file2", "type": "object"}' ']}' ) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source'.format(snapshot_name), source_dir_listing_json) # Add request responses for creating directories and cloning files # to the destination tree self._add_file_clone_response(m, '/ifs/admin/target/file1', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/file2', snapshot_name) self._add_create_directory_response(m, fq_target_dir + '/dir1', False) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source/dir1'.format(snapshot_name), '{"children": [' '{"name": "file11", "type": "object"}, ' '{"name": "file12", "type": "object"}' ']}') self._add_file_clone_response(m, '/ifs/admin/target/dir1/file11', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/dir1/file12', snapshot_name) self._add_create_directory_response(m, fq_target_dir + '/dir2', False) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source/dir2'.format(snapshot_name), '{"children": [' '{"name": "file21", "type": "object"}, ' '{"name": "file22", "type": "object"}' ']}') self._add_file_clone_response(m, '/ifs/admin/target/dir2/file21', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/dir2/file22', snapshot_name) # Call method under test self.isilon_api.clone_snapshot(snapshot_name, fq_target_dir) # Verify calls needed to clone the source snapshot to the target dir expected_calls = [] clone_path_list = [ 'file1', 'file2', 'dir1/file11', 'dir1/file12', 'dir2/file21', 'dir2/file22'] for path in clone_path_list: expected_call = IsilonApiTest.ExpectedCall( IsilonApiTest.ExpectedCall.FILE_CLONE, self._mock_url + '/namespace/ifs/admin/target/' + path, ['/ifs/admin/target/' + path, '/ifs/admin/source/' + path, snapshot_name]) expected_calls.append(expected_call) dir_path_list = [ ('/dir1?recursive', '/dir1'), ('/dir2?recursive', '/dir2'), ('?recursive=', '')] for url, path in dir_path_list: expected_call = IsilonApiTest.ExpectedCall( IsilonApiTest.ExpectedCall.DIR_CREATION, self._mock_url + '/namespace/ifs/admin/target' + url, ['/ifs/admin/target' + path, False]) expected_calls.append(expected_call) self._verify_clone_snapshot_calls(expected_calls, m.request_history) class ExpectedCall(object): DIR_CREATION = 'dir_creation' FILE_CLONE = 'file_clone' def __init__(self, request_type, match_url, verify_args): self.request_type = request_type self.match_url = match_url self.verify_args = verify_args def _verify_clone_snapshot_calls(self, expected_calls, response_calls): actual_calls = [] for call in response_calls: actual_calls.append(call) for expected_call in expected_calls: # Match the expected call to the actual call, then verify match_found = False for call in actual_calls: if call.url.startswith(expected_call.match_url): match_found = True if expected_call.request_type is 'dir_creation': self._verify_dir_creation_request( call, *expected_call.verify_args) elif expected_call.request_type is 'file_clone': pass else: self.fail('Invalid request type') actual_calls.remove(call) self.assertTrue(match_found) @requests_mock.mock() def test_get_directory_listing(self, m): self.assertEqual(0, len(m.request_history)) fq_dir_path = 'ifs/admin/test' json_str = '{"my_json": "test123"}' self._add_get_directory_listing_response(m, fq_dir_path, json_str) actual_json = self.isilon_api.get_directory_listing(fq_dir_path) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(json_str), actual_json) @ddt.data((200, True), (404, False)) def test_is_path_existent(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) path = '/ifs/home/admin' m.head('{0}/namespace{1}'.format(self._mock_url, path), status_code=status_code) r = self.isilon_api.is_path_existent(path) self.assertEqual(expected_return_value, r) self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_is_path_existent_unexpected_error(self, m): path = '/ifs/home/admin' m.head('{0}/namespace{1}'.format(self._mock_url, path), status_code=400) self.assertRaises( requests.exceptions.HTTPError, self.isilon_api.is_path_existent, '/ifs/home/admin') @ddt.data( (200, '{"snapshots": [{"path": "/ifs/home/test"}]}', {'path': '/ifs/home/test'}), (404, '{"errors": []}', None) ) def test_get_snapshot(self, data): status_code, json_body, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) snapshot_name = 'foo1' self._add_get_snapshot_response(m, snapshot_name, json_body, status=status_code) r = self.isilon_api.get_snapshot(snapshot_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @requests_mock.mock() def test_get_snapshot_unexpected_error(self, m): snapshot_name = 'foo1' json_body = '{"snapshots": [{"path": "/ifs/home/test"}]}' self._add_get_snapshot_response( m, snapshot_name, json_body, status=400) self.assertRaises( requests.exceptions.HTTPError, self.isilon_api.get_snapshot, snapshot_name) @requests_mock.mock() def test_get_snapshots(self, m): self.assertEqual(0, len(m.request_history)) snapshot_json = '{"snapshots": [{"path": "/ifs/home/test"}]}' m.get('{0}/platform/1/snapshot/snapshots'.format(self._mock_url), status_code=200, json=json.loads(snapshot_json)) r = self.isilon_api.get_snapshots() self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(snapshot_json), r) @requests_mock.mock() def test_get_snapshots_error_occurred(self, m): self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/1/snapshot/snapshots'.format(self._mock_url), status_code=404) self.assertRaises(requests.exceptions.HTTPError, self.isilon_api.get_snapshots) self.assertEqual(1, len(m.request_history)) @ddt.data( ('/ifs/home/admin', '{"exports": [{"id": 42, "paths": ["/ifs/home/admin"]}]}', 42), ('/ifs/home/test', '{"exports": [{"id": 42, "paths": ["/ifs/home/admin"]}]}', None) ) def test_lookup_nfs_export(self, data): share_path, response_json, expected_return = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/1/protocols/nfs/exports' .format(self._mock_url), json=json.loads(response_json)) r = self.isilon_api.lookup_nfs_export(share_path) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return, r) @requests_mock.mock() def test_get_nfs_export(self, m): self.assertEqual(0, len(m.request_history)) export_id = 42 response_json = '{"exports": [{"id": 1}]}' status_code = 200 m.get('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, export_id), json=json.loads(response_json), status_code=status_code) r = self.isilon_api.get_nfs_export(export_id) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads('{"id": 1}'), r) @requests_mock.mock() def test_get_nfs_export_error(self, m): self.assertEqual(0, len(m.request_history)) export_id = 3 response_json = '{}' status_code = 404 m.get('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, export_id), json=json.loads(response_json), status_code=status_code) r = self.isilon_api.get_nfs_export(export_id) self.assertEqual(1, len(m.request_history)) self.assertIsNone(r) @requests_mock.mock() def test_lookup_smb_share(self, m): self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' share_json = '{"id": "my_smb_share"}' response_json = '{{"shares": [{0}]}}'.format(share_json) m.get('{0}/platform/1/protocols/smb/shares/{1}' .format(self._mock_url, share_name), status_code=200, json=json.loads(response_json)) r = self.isilon_api.lookup_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(share_json), r) @requests_mock.mock() def test_lookup_smb_share_error(self, m): self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' m.get('{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name), status_code=404) r = self.isilon_api.lookup_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertIsNone(r) @ddt.data((201, True), (404, False)) def test_create_nfs_export(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) export_path = '/ifs/home/test' m.post(self._mock_url + '/platform/1/protocols/nfs/exports', status_code=status_code) r = self.isilon_api.create_nfs_export(export_path) self.assertEqual(1, len(m.request_history)) call = m.request_history[0] expected_request_body = '{"paths": ["/ifs/home/test"]}' self.assertEqual(json.loads(expected_request_body), json.loads(call.body)) self.assertEqual(expected_return_value, r) @ddt.data((201, True), (404, False)) def test_create_smb_share(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' share_path = '/ifs/home/admin/smb_share' m.post(self._mock_url + '/platform/1/protocols/smb/shares', status_code=status_code) r = self.isilon_api.create_smb_share(share_name, share_path) self.assertEqual(expected_return_value, r) self.assertEqual(1, len(m.request_history)) expected_request_data = { 'name': share_name, 'path': share_path, 'permissions': [] } self.assertEqual(expected_request_data, json.loads(m.request_history[0].body)) @requests_mock.mock() def test_create_snapshot(self, m): self.assertEqual(0, len(m.request_history)) snapshot_name = 'my_snapshot_01' snapshot_path = '/ifs/home/admin' m.post(self._mock_url + '/platform/1/snapshot/snapshots', status_code=201) r = self.isilon_api.create_snapshot(snapshot_name, snapshot_path) self.assertEqual(1, len(m.request_history)) self.assertTrue(r) expected_request_body = json.loads( '{{"name": "{0}", "path": "{1}"}}' .format(snapshot_name, snapshot_path) ) self.assertEqual(expected_request_body, json.loads(m.request_history[0].body)) @requests_mock.mock() def test_create_snapshot_error_case(self, m): self.assertEqual(0, len(m.request_history)) snapshot_name = 'my_snapshot_01' snapshot_path = '/ifs/home/admin' m.post(self._mock_url + '/platform/1/snapshot/snapshots', status_code=404) self.assertRaises(requests.exceptions.HTTPError, self.isilon_api.create_snapshot, snapshot_name, snapshot_path) self.assertEqual(1, len(m.request_history)) @ddt.data(True, False) def test_delete(self, is_recursive_delete): with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) fq_path = '/ifs/home/admin/test' m.delete(self._mock_url + '/namespace' + fq_path + '?recursive=' + six.text_type(is_recursive_delete), status_code=204) self.isilon_api.delete(fq_path, recursive=is_recursive_delete) self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_delete_error_case(self, m): fq_path = '/ifs/home/admin/test' m.delete(self._mock_url + '/namespace' + fq_path + '?recursive=False', status_code=403) self.assertRaises(requests.exceptions.HTTPError, self.isilon_api.delete, fq_path, recursive=False) @ddt.data((204, True), (404, False)) def test_delete_nfs_share(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_number = 42 m.delete('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, share_number), status_code=status_code) r = self.isilon_api.delete_nfs_share(share_number) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @ddt.data((204, True), (404, False)) def test_delete_smb_shares(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_name = 'smb_share_42' m.delete('{0}/platform/1/protocols/smb/shares/{1}' .format(self._mock_url, share_name), status_code=status_code) r = self.isilon_api.delete_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @requests_mock.mock() def test_delete_snapshot(self, m): self.assertEqual(0, len(m.request_history)) m.delete(self._mock_url + '/platform/1/snapshot/snapshots/my_snapshot', status_code=204) self.isilon_api.delete_snapshot("my_snapshot") self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_delete_snapshot_error_case(self, m): m.delete(self._mock_url + '/platform/1/snapshot/snapshots/my_snapshot', status_code=403) self.assertRaises(requests.exceptions.HTTPError, self.isilon_api.delete_snapshot, "my_snapshot") @requests_mock.mock() def test_quota_create(self, m): quota_path = '/ifs/manila/test' quota_size = 256 self.assertEqual(0, len(m.request_history)) m.post(self._mock_url + '/platform/1/quota/quotas', status_code=201) self.isilon_api.quota_create(quota_path, 'directory', quota_size) self.assertEqual(1, len(m.request_history)) expected_request_json = { 'path': quota_path, 'type': 'directory', 'include_snapshots': False, 'thresholds_include_overhead': False, 'enforced': True, 'thresholds': {'hard': quota_size}, } call_body = m.request_history[0].body self.assertEqual(expected_request_json, json.loads(call_body)) @requests_mock.mock() def test_quota_create__path_does_not_exist(self, m): quota_path = '/ifs/test2' self.assertEqual(0, len(m.request_history)) m.post(self._mock_url + '/platform/1/quota/quotas', status_code=400) self.assertRaises( requests.exceptions.HTTPError, self.isilon_api.quota_create, quota_path, 'directory', 2 ) @requests_mock.mock() def test_quota_get(self, m): self.assertEqual(0, len(m.request_history)) response_json = {'quotas': [{}]} m.get(self._mock_url + '/platform/1/quota/quotas', json=response_json, status_code=200) quota_path = "/ifs/manila/test" quota_type = "directory" self.isilon_api.quota_get(quota_path, quota_type) self.assertEqual(1, len(m.request_history)) request_query_string = m.request_history[0].qs expected_query_string = {'path': [quota_path]} self.assertEqual(expected_query_string, request_query_string) @requests_mock.mock() def test_quota_get__path_does_not_exist(self, m): self.assertEqual(0, len(m.request_history)) m.get(self._mock_url + '/platform/1/quota/quotas', status_code=404) response = self.isilon_api.quota_get( '/ifs/does_not_exist', 'directory') self.assertIsNone(response) @requests_mock.mock() def test_quota_modify(self, m): self.assertEqual(0, len(m.request_history)) quota_id = "ADEF1G" new_size = 1024 m.put('{0}/platform/1/quota/quotas/{1}'.format( self._mock_url, quota_id), status_code=204) self.isilon_api.quota_modify_size(quota_id, new_size) self.assertEqual(1, len(m.request_history)) expected_request_body = {'thresholds': {'hard': new_size}} request_body = m.request_history[0].body self.assertEqual(expected_request_body, json.loads(request_body)) @requests_mock.mock() def test_quota_modify__given_id_does_not_exist(self, m): quota_id = 'ADE2F' m.put('{0}/platform/1/quota/quotas/{1}'.format( self._mock_url, quota_id), status_code=404) self.assertRaises( requests.exceptions.HTTPError, self.isilon_api.quota_modify_size, quota_id, 1024 ) @requests_mock.mock() def test_quota_set__quota_already_exists(self, m): self.assertEqual(0, len(m.request_history)) quota_path = '/ifs/manila/test' quota_type = 'directory' quota_size = 256 quota_id = 'AFE2C' m.get('{0}/platform/1/quota/quotas'.format( self._mock_url), json={'quotas': [{'id': quota_id}]}, status_code=200) m.put( '{0}/platform/1/quota/quotas/{1}'.format(self._mock_url, quota_id), status_code=204 ) self.isilon_api.quota_set(quota_path, quota_type, quota_size) expected_quota_modify_json = {'thresholds': {'hard': quota_size}} quota_put_json = json.loads(m.request_history[1].body) self.assertEqual(expected_quota_modify_json, quota_put_json) @requests_mock.mock() def test_quota_set__quota_does_not_already_exist(self, m): self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/1/quota/quotas'.format( self._mock_url), status_code=404) m.post('{0}/platform/1/quota/quotas'.format(self._mock_url), status_code=201) quota_path = '/ifs/manila/test' quota_type = 'directory' quota_size = 256 self.isilon_api.quota_set(quota_path, quota_type, quota_size) # verify a call is made to create a quota expected_create_json = { six.text_type('path'): quota_path, six.text_type('type'): 'directory', six.text_type('include_snapshots'): False, six.text_type('thresholds_include_overhead'): False, six.text_type('enforced'): True, six.text_type('thresholds'): {six.text_type('hard'): quota_size}, } create_request_json = json.loads(m.request_history[1].body) self.assertEqual(expected_create_json, create_request_json) @requests_mock.mock() def test_quota_set__path_does_not_already_exist(self, m): m.get(self._mock_url + '/platform/1/quota/quotas', status_code=400) e = self.assertRaises( requests.exceptions.HTTPError, self.isilon_api.quota_set, '/ifs/does_not_exist', 'directory', 2048 ) self.assertEqual(400, e.response.status_code) @ddt.data( ('foouser', isilon_api.SmbPermission.rw), ('testuser', isilon_api.SmbPermission.ro), ) def test_smb_permission_add(self, data): user, smb_permission = data share_name = 'testshare' with requests_mock.mock() as m: papi_share_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name) share_data = { 'shares': [ {'permissions': []} ] } m.get(papi_share_url, status_code=200, json=share_data) auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}' \ ''.format(self._mock_url, user) example_sid = 'SID:S-1-5-21' sid_json = { 'id': example_sid, 'name': user, 'type': 'user' } auth_json = {'mapping': [ {'user': {'sid': sid_json}} ]} m.get(auth_url, status_code=200, json=auth_json) m.put(papi_share_url) self.isilon_api.smb_permissions_add(share_name, user, smb_permission) perms_put_request = m.request_history[2] expected_perm_request_json = { 'permissions': [ {'permission': smb_permission.value, 'permission_type': 'allow', 'trustee': sid_json } ] } self.assertEqual(expected_perm_request_json, json.loads(perms_put_request.body)) @requests_mock.mock() def test_smb_permission_add_with_multiple_users_found(self, m): user = 'foouser' smb_permission = isilon_api.SmbPermission.rw share_name = 'testshare' papi_share_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name) share_data = { 'shares': [ {'permissions': []} ] } m.get(papi_share_url, status_code=200, json=share_data) auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}' \ ''.format(self._mock_url, user) example_sid = 'SID:S-1-5-21' sid_json = { 'id': example_sid, 'name': user, 'type': 'user' } auth_json = {'mapping': [ {'user': {'sid': sid_json}}, {'user': {'sid': sid_json}}, ]} m.get(auth_url, status_code=200, json=auth_json) m.put(papi_share_url) self.assertRaises(exception.ShareBackendException, self.isilon_api.smb_permissions_add, share_name, user, smb_permission) @requests_mock.mock() def test_smb_permission_remove(self, m): share_name = 'testshare' user = 'testuser' share_data = { 'permissions': [{ 'permission': 'change', 'permission_type': 'allow', 'trustee': { 'id': 'SID:S-1-5-21', 'name': user, 'type': 'user', } }] } papi_share_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name) m.get(papi_share_url, status_code=200, json={'shares': [share_data]}) num_existing_perms = len(self.isilon_api.lookup_smb_share(share_name)) self.assertEqual(1, num_existing_perms) m.put(papi_share_url) self.isilon_api.smb_permissions_remove(share_name, user) smb_put_request = m.request_history[2] expected_body = {'permissions': []} expected_body = json.dumps(expected_body) self.assertEqual(expected_body, smb_put_request.body) @requests_mock.mock() def test_smb_permission_remove_with_multiple_existing_perms(self, m): share_name = 'testshare' user = 'testuser' foouser_perms = { 'permission': 'change', 'permission_type': 'allow', 'trustee': { 'id': 'SID:S-1-5-21', 'name': 'foouser', 'type': 'user', } } user_perms = { 'permission': 'change', 'permission_type': 'allow', 'trustee': { 'id': 'SID:S-1-5-22', 'name': user, 'type': 'user', } } share_data = { 'permissions': [ foouser_perms, user_perms, ] } papi_share_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name) m.get(papi_share_url, status_code=200, json={'shares': [share_data]}) num_existing_perms = len(self.isilon_api.lookup_smb_share( share_name)['permissions']) self.assertEqual(2, num_existing_perms) m.put(papi_share_url) self.isilon_api.smb_permissions_remove(share_name, user) smb_put_request = m.request_history[2] expected_body = {'permissions': [foouser_perms]} expected_body = json.dumps(expected_body) self.assertEqual(json.loads(expected_body), json.loads(smb_put_request.body)) @requests_mock.mock() def test_smb_permission_remove_with_empty_perms_list(self, m): share_name = 'testshare' user = 'testuser' share_data = {'permissions': []} papi_share_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name) m.get(papi_share_url, status_code=200, json={'shares': [share_data]}) m.put(papi_share_url) self.assertRaises(exception.ShareBackendException, self.isilon_api.smb_permissions_remove, share_name, user) @requests_mock.mock() def test_auth_lookup_user(self, m): user = 'foo' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) example_sid = 'SID:S-1-5-21' sid_json = { 'id': example_sid, 'name': user, 'type': 'user' } auth_json = { 'mapping': [ {'user': {'sid': sid_json}} ] } m.get(auth_url, status_code=200, json=auth_json) returned_auth_json = self.isilon_api.auth_lookup_user(user) self.assertEqual(auth_json, returned_auth_json) @requests_mock.mock() def test_auth_lookup_user_with_nonexistent_user(self, m): user = 'nonexistent' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) m.get(auth_url, status_code=404) self.assertRaises(exception.ShareBackendException, self.isilon_api.auth_lookup_user, user) @requests_mock.mock() def test_auth_lookup_user_with_backend_error(self, m): user = 'foo' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) m.get(auth_url, status_code=400) self.assertRaises(requests.exceptions.HTTPError, self.isilon_api.auth_lookup_user, user) def _add_create_directory_response(self, m, path, is_recursive): url = '{0}/namespace{1}?recursive={2}'.format( self._mock_url, path, six.text_type(is_recursive)) m.put(url, status_code=200) def _add_file_clone_response(self, m, fq_dest_path, snapshot_name): url = '{0}/namespace{1}?clone=true&snapshot={2}'.format( self._mock_url, fq_dest_path, snapshot_name) m.put(url) def _add_get_directory_listing_response(self, m, fq_dir_path, json_str): url = '{0}/namespace{1}?detail=default'.format( self._mock_url, fq_dir_path) m.get(url, json=json.loads(json_str), status_code=200) def _add_get_snapshot_response( self, m, snapshot_name, json_str, status=200): url = '{0}/platform/1/snapshot/snapshots/{1}'.format( self._mock_url, snapshot_name ) m.get(url, status_code=status, json=json.loads(json_str)) def _verify_dir_creation_request(self, request, path, is_recursive): self.assertEqual('PUT', request.method) expected_url = '{0}/namespace{1}?recursive={2}'.format( self._mock_url, path, six.text_type(is_recursive)) self.assertEqual(expected_url, request.url) self.assertTrue("x-isi-ifs-target-type" in request.headers) self.assertEqual("container", request.headers['x-isi-ifs-target-type']) def _verify_clone_file_from_snapshot( self, request, fq_file_path, fq_dest_path, snapshot_name): self.assertEqual('PUT', request.method) expected_url = '{0}/namespace{1}?clone=true&snapshot={2}'.format( self._mock_url, fq_dest_path, snapshot_name ) self.assertEqual(expected_url, request.request.url) self.assertTrue("x-isi-ifs-copy-source" in request.headers) self.assertEqual('/namespace' + fq_file_path, request.headers['x-isi-ifs-copy-source']) manila-2.0.0/manila/tests/share/drivers/emc/plugins/isilon/test_isilon.py0000664000567000056710000007704412701407107027741 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_log import log from oslo_utils import units import six from manila.common import constants as const from manila import exception from manila.share.drivers.emc.plugins.isilon import isilon from manila.share.drivers.emc.plugins.isilon.isilon_api import SmbPermission from manila import test LOG = log.getLogger(__name__) @ddt.ddt class IsilonTest(test.TestCase): """Integration test for the Isilon Manila driver.""" ISILON_ADDR = '10.0.0.1' API_URL = 'https://%s:8080' % ISILON_ADDR AUTH = ('admin', 'admin') ROOT_DIR = '/ifs/manila-test' SHARE_NAME = 'share-foo' SHARE_DIR = ROOT_DIR + '/' + SHARE_NAME ADMIN_HOME_DIR = '/ifs/home/admin' CLONE_DIR = ROOT_DIR + '/clone-dir' class MockConfig(object): def safe_get(self, value): if value == 'emc_nas_server': return '10.0.0.1' elif value == 'emc_nas_server_port': return '8080' elif value == 'emc_nas_login': return 'admin' elif value == 'emc_nas_password': return 'a' elif value == 'emc_nas_root_dir': return '/ifs/manila-test' else: return None @mock.patch( 'manila.share.drivers.emc.plugins.isilon.isilon.isilon_api.IsilonApi', autospec=True) def setUp(self, mock_isi_api): super(IsilonTest, self).setUp() self._mock_isilon_api = mock_isi_api.return_value self.storage_connection = isilon.IsilonStorageConnection(LOG) self.mock_context = mock.Mock('Context') self.mock_emc_driver = mock.Mock('EmcDriver') self.mock_emc_driver.attach_mock(self.MockConfig(), 'configuration') self.storage_connection.connect( self.mock_emc_driver, self.mock_context) def test_allow_access_single_ip_nfs(self): # setup share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RW} share_server = None fake_export_id = 1 self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id self._mock_isilon_api.get_nfs_export.return_value = { 'clients': []} self.assertFalse(self._mock_isilon_api.request.called) # call method under test self.storage_connection.allow_access(self.mock_context, share, access, share_server) # verify expected REST API call is executed expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + str(fake_export_id)) expected_data = {'clients': ['10.1.1.10']} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data) def test_allow_access_with_nfs_readonly(self): # setup share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RO} fake_export_id = 70 self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id self._mock_isilon_api.get_nfs_export.return_value = { 'read_only_clients': []} self.assertFalse(self._mock_isilon_api.request.called) self.storage_connection.allow_access( self.mock_context, share, access, None) # verify expected REST API call is executed expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + six.text_type(fake_export_id)) expected_data = {'read_only_clients': ['10.1.1.10']} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data) def test_allow_access_with_nfs_readwrite(self): # setup share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RW} fake_export_id = 70 self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id self._mock_isilon_api.get_nfs_export.return_value = { 'clients': []} self.assertFalse(self._mock_isilon_api.request.called) self.storage_connection.allow_access( self.mock_context, share, access, None) # verify expected REST API call is executed expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + six.text_type(fake_export_id)) expected_data = {'clients': ['10.1.1.10']} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data) def test_allow_access_with_cifs_ip_readonly(self): # Note: Driver does not currently support readonly access for "ip" type share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'} access = {'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RO} self.assertRaises( exception.InvalidShareAccess, self.storage_connection.allow_access, self.mock_context, share, access, None) def test_deny_access__ip_nfs_readwrite(self): """Verifies that an IP will be remove from a whitelist.""" fake_export_id = 1 self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id # simulate an IP added to the whitelist ip_addr = '10.0.0.4' self._mock_isilon_api.get_nfs_export.return_value = { 'clients': [ip_addr]} share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': ip_addr, 'access_level': const.ACCESS_LEVEL_RW} share_server = None # call method under test self.assertFalse(self._mock_isilon_api.request.called) self.storage_connection.deny_access(self.mock_context, share, access, share_server) # verify that a call is made to remove an existing IP from the list expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + str(fake_export_id)) expected_data = {'clients': []} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data ) def test_deny_access__nfs_ip_readonly(self): fake_export_id = 1 self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id # simulate an IP added to the whitelist ip_addr = '10.0.0.4' self._mock_isilon_api.get_nfs_export.return_value = { 'read_only_clients': [ip_addr]} share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': ip_addr, 'access_level': const.ACCESS_LEVEL_RO} share_server = None # call method under test self.assertFalse(self._mock_isilon_api.request.called) self.storage_connection.deny_access(self.mock_context, share, access, share_server) # verify that a call is made to remove an existing IP from the list expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + six.text_type(fake_export_id)) expected_data = {'read_only_clients': []} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data ) def test_deny_access_ip_cifs(self): """Verifies that an IP will be remove from a whitelist. Precondition: the IP to be removed exists in the whitelist. Otherwise, do nothing. """ # setup ip_addr = '10.1.1.10' share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'} self._mock_isilon_api.lookup_smb_share.return_value = { 'host_acl': ['allow:' + ip_addr]} self.assertFalse(self._mock_isilon_api.request.called) # call method under test access = {'access_type': 'ip', 'access_to': ip_addr, 'access_level': const.ACCESS_LEVEL_RW} share_server = None self.storage_connection.deny_access(self.mock_context, share, access, share_server) # verify API call is made to remove IP is removed from whitelist expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' + self.SHARE_NAME) expected_data = {'host_acl': []} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data) def test_deny_access_nfs_invalid_access_type(self): share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'foo_access_type', 'access_to': '10.0.0.1'} # This operation should return silently self.storage_connection.deny_access( self.mock_context, share, access, None) def test_deny_access_cifs_invalid_access_type(self): share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'} access = {'access_type': 'foo_access_type', 'access_to': '10.0.0.1'} # This operation should return silently self.storage_connection.deny_access(self.mock_context, share, access, None) def test_deny_access_invalid_share_protocol(self): share = {'name': self.SHARE_NAME, 'share_proto': 'FOO'} access = {'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': const.ACCESS_LEVEL_RW} # This operation should return silently self.storage_connection.deny_access( self.mock_context, share, access, None) def test_deny_access_nfs_export_does_not_exist(self): share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': const.ACCESS_LEVEL_RW} self._mock_isilon_api.lookup_nfs_export.return_value = 1 self._mock_isilon_api.get_nfs_export.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.deny_access, self.mock_context, share, access, None ) def test_deny_access_nfs_share_does_not_exist(self): share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': const.ACCESS_LEVEL_RW} self._mock_isilon_api.lookup_nfs_export.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.deny_access, self.mock_context, share, access, None) def test_deny_access_nfs_share_does_not_contain_required_key(self): share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = { 'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': const.ACCESS_LEVEL_RW, } self._mock_isilon_api.get_nfs_export.return_value = {} self.assertRaises(exception.ShareBackendException, self.storage_connection.deny_access, self.mock_context, share, access, None) def test_allow_access_multiple_ip_nfs(self): """Verifies adding an IP to a whitelist with pre-existing ips. Verifies that when adding an additional IP to a whitelist which already contains IPs, the Isilon driver successfully appends the IP to the whitelist. """ # setup fake_export_id = 42 new_allowed_ip = '10.7.7.8' self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id existing_ips = ['10.0.0.1', '10.1.1.1', '10.0.0.2'] export_json = { 'clients': existing_ips, 'access_level': const.ACCESS_LEVEL_RW, } self._mock_isilon_api.get_nfs_export.return_value = export_json self.assertFalse(self._mock_isilon_api.request.called) # call method under test share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'} access = {'access_type': 'ip', 'access_to': new_allowed_ip, 'access_level': const.ACCESS_LEVEL_RW} share_server = None self.storage_connection.allow_access( self.mock_context, share, access, share_server) # verify access rule is applied expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' + str(fake_export_id)) self.assertTrue(self._mock_isilon_api.request.called) args, kwargs = self._mock_isilon_api.request.call_args action, url = args self.assertEqual('PUT', action) self.assertEqual(expected_url, url) self.assertEqual(1, len(kwargs)) self.assertTrue('data' in kwargs) actual_clients = set(kwargs['data']['clients']) expected_clients = set(existing_ips) expected_clients.add(new_allowed_ip) self.assertEqual(expected_clients, actual_clients) def test_allow_access_multiple_ip_cifs(self): """Verifies adding an IP to a whitelist with pre-existing ips. Verifies that when adding an additional IP to a whitelist which already contains IPs, the Isilon driver successfully appends the IP to the whitelist. """ # setup share_name = self.SHARE_NAME new_allowed_ip = '10.101.1.1' existing_ips = ['allow:10.0.0.1', 'allow:10.1.1.1', 'allow:10.0.0.2'] share_json = {'name': share_name, 'host_acl': existing_ips} self._mock_isilon_api.lookup_smb_share.return_value = share_json self.assertFalse(self._mock_isilon_api.request.called) # call method under test share = {'name': share_name, 'share_proto': 'CIFS'} access = {'access_type': 'ip', 'access_to': new_allowed_ip, 'access_level': const.ACCESS_LEVEL_RW} share_server = None self.storage_connection.allow_access(self.mock_context, share, access, share_server) # verify access rule is applied expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' + share_name) self.assertTrue(self._mock_isilon_api.request.called) args, kwargs = self._mock_isilon_api.request.call_args action, url = args self.assertEqual('PUT', action) self.assertEqual(expected_url, url) self.assertEqual(1, len(kwargs)) self.assertTrue('data' in kwargs) actual_clients = set(kwargs['data']['host_acl']) expected_clients = set(existing_ips) expected_clients.add('allow:' + new_allowed_ip) self.assertEqual(expected_clients, actual_clients) def test_allow_access_single_ip_cifs(self): # setup share_name = self.SHARE_NAME share = {'name': share_name, 'share_proto': 'CIFS'} allow_ip = '10.1.1.10' access = {'access_type': 'ip', 'access_to': allow_ip, 'access_level': const.ACCESS_LEVEL_RW} share_server = None self._mock_isilon_api.lookup_smb_share.return_value = { 'name': share_name, 'host_acl': []} self.assertFalse(self._mock_isilon_api.request.called) # call method under test self.storage_connection.allow_access(self.mock_context, share, access, share_server) # verify access rule is applied expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' + self.SHARE_NAME) expected_data = {'host_acl': ['allow:' + allow_ip]} self._mock_isilon_api.request.assert_called_once_with( 'PUT', expected_url, data=expected_data) @ddt.data( ('foo', const.ACCESS_LEVEL_RW, SmbPermission.rw), ('testuser', const.ACCESS_LEVEL_RO, SmbPermission.ro), ) def test_allow_access_with_cifs_user(self, data): # setup share_name = self.SHARE_NAME user, access_level, expected_smb_perm = data share = {'name': share_name, 'share_proto': 'CIFS'} access = {'access_type': 'user', 'access_to': user, 'access_level': access_level} self.storage_connection.allow_access(self.mock_context, share, access, None) self._mock_isilon_api.smb_permissions_add.assert_called_once_with( share_name, user, expected_smb_perm) def test_allow_access_with_cifs_user_invalid_access_level(self): share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'} access = { 'access_type': 'user', 'access_to': 'foo', 'access_level': 'everything', } self.assertRaises(exception.InvalidShareAccess, self.storage_connection.allow_access, self.mock_context, share, access, None) def test_allow_access_with_cifs_invalid_access_type(self): share_name = self.SHARE_NAME share = {'name': share_name, 'share_proto': 'CIFS'} access = {'access_type': 'fooaccesstype', 'access_to': 'testuser', 'access_level': const.ACCESS_LEVEL_RW} self.assertRaises(exception.InvalidShareAccess, self.storage_connection.allow_access, self.mock_context, share, access, None) def test_deny_access_with_cifs_user(self): share_name = self.SHARE_NAME user_to_remove = 'testuser' share = {'name': share_name, 'share_proto': 'CIFS'} access = {'access_type': 'user', 'access_to': user_to_remove, 'access_level': const.ACCESS_LEVEL_RW} self.assertFalse(self._mock_isilon_api.smb_permissions_remove.called) self.storage_connection.deny_access(self.mock_context, share, access, None) self._mock_isilon_api.smb_permissions_remove.assert_called_with( share_name, user_to_remove) def test_allow_access_invalid_access_type(self): # setup share_name = self.SHARE_NAME share = {'name': share_name, 'share_proto': 'NFS'} allow_ip = '10.1.1.10' access = {'access_type': 'foo_access_type', 'access_to': allow_ip} # verify method under test throws the expected exception self.assertRaises( exception.InvalidShareAccess, self.storage_connection.allow_access, self.mock_context, share, access, None) def test_allow_access_invalid_share_protocol(self): # setup share_name = self.SHARE_NAME share = {'name': share_name, 'share_proto': 'FOO_PROTOCOL'} allow_ip = '10.1.1.10' access = {'access_type': 'ip', 'access_to': allow_ip} # verify method under test throws the expected exception self.assertRaises( exception.InvalidShare, self.storage_connection.allow_access, self.mock_context, share, access, None) def test_create_share_nfs(self): share_path = self.SHARE_DIR self.assertFalse(self._mock_isilon_api.create_directory.called) self.assertFalse(self._mock_isilon_api.create_nfs_export.called) # create the share share = {"name": self.SHARE_NAME, "share_proto": 'NFS', "size": 8} location = self.storage_connection.create_share(self.mock_context, share, None) # verify location and API call made expected_location = '%s:%s' % (self.ISILON_ADDR, self.SHARE_DIR) self.assertEqual(expected_location, location) self._mock_isilon_api.create_directory.assert_called_with(share_path) self._mock_isilon_api.create_nfs_export.assert_called_with(share_path) # verify directory quota call made self._mock_isilon_api.quota_create.assert_called_with( share_path, 'directory', 8 * units.Gi) def test_create_share_cifs(self): self.assertFalse(self._mock_isilon_api.create_directory.called) self.assertFalse(self._mock_isilon_api.create_smb_share.called) # create the share share = {"name": self.SHARE_NAME, "share_proto": 'CIFS', "size": 8} location = self.storage_connection.create_share(self.mock_context, share, None) expected_location = '\\\\{0}\\{1}'.format( self.ISILON_ADDR, self.SHARE_NAME) self.assertEqual(expected_location, location) self._mock_isilon_api.create_directory.assert_called_once_with( self.SHARE_DIR) self._mock_isilon_api.create_smb_share.assert_called_once_with( self.SHARE_NAME, self.SHARE_DIR) # verify directory quota call made self._mock_isilon_api.quota_create.assert_called_with( self.SHARE_DIR, 'directory', 8 * units.Gi) def test_create_share_invalid_share_protocol(self): share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'} self.assertRaises( exception.InvalidShare, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_share_nfs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} self._mock_isilon_api.create_nfs_export.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_snapshot(self): # create snapshot snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' snapshot = {'name': snapshot_name, 'share_name': snapshot_path} self.storage_connection.create_snapshot(self.mock_context, snapshot, None) # verify the create snapshot API call is executed self._mock_isilon_api.create_snapshot.assert_called_with(snapshot_name, snapshot_path) def test_create_share_from_snapshot_nfs(self): # assertions self.assertFalse(self._mock_isilon_api.create_nfs_export.called) self.assertFalse(self._mock_isilon_api.clone_snapshot.called) snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' # execute method under test snapshot = {'name': snapshot_name, 'share_name': snapshot_path} share = {"name": self.SHARE_NAME, "share_proto": 'NFS', 'size': 5} location = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot, None) # verify NFS export created at expected location self._mock_isilon_api.create_nfs_export.assert_called_with( self.SHARE_DIR) # verify clone_directory(container_path) method called self._mock_isilon_api.clone_snapshot.assert_called_once_with( snapshot_name, self.SHARE_DIR) expected_location = '{0}:{1}'.format( self.ISILON_ADDR, self.SHARE_DIR) self.assertEqual(expected_location, location) # verify directory quota call made self._mock_isilon_api.quota_create.assert_called_with( self.SHARE_DIR, 'directory', 5 * units.Gi) def test_create_share_from_snapshot_cifs(self): # assertions self.assertFalse(self._mock_isilon_api.create_smb_share.called) self.assertFalse(self._mock_isilon_api.clone_snapshot.called) # setup snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' new_share_name = 'clone-dir' # execute method under test snapshot = {'name': snapshot_name, 'share_name': snapshot_path} share = {"name": new_share_name, "share_proto": 'CIFS', "size": 2} location = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot, None) # verify call made to create new CIFS share self._mock_isilon_api.create_smb_share.assert_called_once_with( new_share_name, self.CLONE_DIR) self._mock_isilon_api.clone_snapshot.assert_called_once_with( snapshot_name, self.CLONE_DIR) expected_location = '\\\\{0}\\{1}'.format(self.ISILON_ADDR, new_share_name) self.assertEqual(expected_location, location) # verify directory quota call made expected_share_path = '{0}/{1}'.format(self.ROOT_DIR, new_share_name) self._mock_isilon_api.quota_create.assert_called_with( expected_share_path, 'directory', 2 * units.Gi) def test_delete_share_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} fake_share_num = 42 self._mock_isilon_api.lookup_nfs_export.return_value = fake_share_num self.assertFalse(self._mock_isilon_api.delete_nfs_share.called) # delete the share self.storage_connection.delete_share(self.mock_context, share, None) # verify share delete self._mock_isilon_api.delete_nfs_share.assert_called_with( fake_share_num) def test_delete_share_cifs(self): self.assertFalse(self._mock_isilon_api.delete_smb_share.called) # delete the share share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self.storage_connection.delete_share(self.mock_context, share, None) # verify share deleted self._mock_isilon_api.delete_smb_share.assert_called_with( self.SHARE_NAME) def test_delete_share_invalid_share_proto(self): share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'} self.assertRaises( exception.InvalidShare, self.storage_connection.delete_share, self.mock_context, share, None ) def test_delete_nfs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} self._mock_isilon_api.delete_nfs_share.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None ) def test_delete_nfs_share_share_does_not_exist(self): self._mock_isilon_api.lookup_nfs_export.return_value = None share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) def test_delete_cifs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self._mock_isilon_api.delete_smb_share.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None ) def test_delete_cifs_share_share_does_not_exist(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self._mock_isilon_api.lookup_smb_share.return_value = None # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) def test_delete_snapshot(self): # create a snapshot snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' snapshot = {'name': snapshot_name, 'share_name': snapshot_path} self.assertFalse(self._mock_isilon_api.delete_snapshot.called) # delete the created snapshot self.storage_connection.delete_snapshot(self.mock_context, snapshot, None) # verify the API call was made to delete the snapshot self._mock_isilon_api.delete_snapshot.assert_called_once_with( snapshot_name) def test_ensure_share(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self.storage_connection.ensure_share(self.mock_context, share, None) @mock.patch( 'manila.share.drivers.emc.plugins.isilon.isilon.isilon_api.IsilonApi', autospec=True) def test_connect(self, mock_isi_api): storage_connection = isilon.IsilonStorageConnection(LOG) # execute method under test storage_connection.connect( self.mock_emc_driver, self.mock_context) # verify connect sets driver params appropriately mock_config = self.MockConfig() server_addr = mock_config.safe_get('emc_nas_server') self.assertEqual(server_addr, storage_connection._server) expected_port = int(mock_config.safe_get('emc_nas_server_port')) self.assertEqual(expected_port, storage_connection._port) self.assertEqual('https://{0}:{1}'.format(server_addr, expected_port), storage_connection._server_url) expected_username = mock_config.safe_get('emc_nas_login') self.assertEqual(expected_username, storage_connection._username) expected_password = mock_config.safe_get('emc_nas_password') self.assertEqual(expected_password, storage_connection._password) self.assertFalse(storage_connection._verify_ssl_cert) @mock.patch( 'manila.share.drivers.emc.plugins.isilon.isilon.isilon_api.IsilonApi', autospec=True) def test_connect_root_dir_does_not_exist(self, mock_isi_api): mock_isilon_api = mock_isi_api.return_value mock_isilon_api.is_path_existent.return_value = False storage_connection = isilon.IsilonStorageConnection(LOG) # call method under test storage_connection.connect(self.mock_emc_driver, self.mock_context) mock_isilon_api.create_directory.assert_called_once_with( self.ROOT_DIR, recursive=True) def test_update_share_stats(self): stats_dict = {} self.storage_connection.update_share_stats(stats_dict) expected_version = isilon.VERSION self.assertEqual({'driver_version': expected_version}, stats_dict) def test_get_network_allocations_number(self): # call method under test num = self.storage_connection.get_network_allocations_number() self.assertEqual(0, num) def test_extend_share(self): quota_id = 'abcdef' new_share_size = 8 share = { "name": self.SHARE_NAME, "share_proto": 'NFS', "size": new_share_size } self._mock_isilon_api.quota_get.return_value = {'id': quota_id} self.assertFalse(self._mock_isilon_api.quota_set.called) self.storage_connection.extend_share(share, new_share_size) share_path = '{0}/{1}'.format(self.ROOT_DIR, self.SHARE_NAME) expected_quota_size = new_share_size * units.Gi self._mock_isilon_api.quota_set.assert_called_once_with( share_path, 'directory', expected_quota_size) manila-2.0.0/manila/tests/share/drivers/test_glusterfs.py0000664000567000056710000006124512701407107024734 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket import ddt import mock from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers import ganesha from manila.share.drivers import glusterfs from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', } fake_share_name = 'fakename' NFS_EXPORT_DIR = 'nfs.export-dir' NFS_EXPORT_VOL = 'nfs.export-volumes' NFS_RPC_AUTH_ALLOW = 'nfs.rpc-auth-allow' NFS_RPC_AUTH_REJECT = 'nfs.rpc-auth-reject' @ddt.ddt class GlusterfsShareDriverTestCase(test.TestCase): """Tests GlusterfsShareDriver.""" def setUp(self): super(GlusterfsShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) CONF.set_default('reserved_share_percentage', 50) CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = glusterfs.GlusterfsShareDriver( execute=self._execute, configuration=self.fake_conf) self.share = fake_share.fake_share(share_proto='NFS') def test_do_setup(self): self.mock_object(self._driver, '_get_helper') self.mock_object(layout.GlusterfsShareDriverBase, 'do_setup') _context = mock.Mock() self._driver.do_setup(_context) self._driver._get_helper.assert_called_once_with() layout.GlusterfsShareDriverBase.do_setup.assert_called_once_with( _context) @ddt.data(True, False) def test_setup_via_manager(self, has_parent): gmgr = mock.Mock() share_mgr_parent = mock.Mock() if has_parent else None nfs_helper = mock.Mock() nfs_helper.get_export = mock.Mock(return_value='host:/vol') self._driver.nfs_helper = mock.Mock(return_value=nfs_helper) ret = self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}, share_manager_parent=share_mgr_parent) gmgr.set_vol_option.assert_called_once_with( 'nfs.export-volumes', False) self._driver.nfs_helper.assert_called_once_with( self._execute, self.fake_conf, gluster_manager=gmgr) nfs_helper.get_export.assert_called_once_with(self.share) self.assertEqual('host:/vol', ret) @ddt.data({'helpercls': None, 'path': '/fakepath'}, {'helpercls': None, 'path': None}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': '/fakepath'}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': None}) @ddt.unpack def test_setup_via_manager_path(self, helpercls, path): gmgr = mock.Mock() gmgr.path = path if not helpercls: helper = mock.Mock() helper.get_export = mock.Mock(return_value='host:/vol') helpercls = mock.Mock(return_value=helper) self._driver.nfs_helper = helpercls if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option = mock.Mock(return_value=True) self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}) if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option.assert_called_once_with( NFS_EXPORT_VOL, boolean=True) args = (NFS_RPC_AUTH_REJECT, '*') else: args = (NFS_EXPORT_VOL, False) gmgr.set_vol_option.assert_called_once_with(*args) def test_setup_via_manager_export_volumes_off(self): gmgr = mock.Mock() gmgr.path = None gmgr.get_vol_option = mock.Mock(return_value=False) self._driver.nfs_helper = glusterfs.GlusterNFSHelper self.assertRaises(exception.GlusterfsException, self._driver._setup_via_manager, {'manager': gmgr, 'share': self.share}) gmgr.get_vol_option.assert_called_once_with(NFS_EXPORT_VOL, boolean=True) def test_check_for_setup_error(self): self._driver.check_for_setup_error() def test_update_share_stats(self): self.mock_object(layout.GlusterfsShareDriverBase, '_update_share_stats') self._driver._update_share_stats() (layout.GlusterfsShareDriverBase._update_share_stats. assert_called_once_with({'storage_protocol': 'NFS', 'vendor_name': 'Red Hat', 'share_backend_name': 'GlusterFS', 'reserved_percentage': 50})) def test_get_network_allocations_number(self): self.assertEqual(0, self._driver.get_network_allocations_number()) def test_get_helper(self): ret = self._driver._get_helper() self.assertIsInstance(ret, self._driver.nfs_helper) @ddt.data({'path': '/fakepath', 'helper': glusterfs.GlusterNFSHelper}, {'path': None, 'helper': glusterfs.GlusterNFSVolHelper}) @ddt.unpack def test_get_helper_vol(self, path, helper): self._driver.nfs_helper = glusterfs.GlusterNFSHelper gmgr = mock.Mock(path=path) ret = self._driver._get_helper(gmgr) self.assertIsInstance(ret, helper) @ddt.data({'op': 'allow', 'kwargs': {}}, {'op': 'allow', 'kwargs': {'share_server': None}}, {'op': 'deny', 'kwargs': {}}, {'op': 'deny', 'kwargs': {'share_server': None}}) @ddt.unpack def test_allow_deny_access_via_manager(self, op, kwargs): self.mock_object(self._driver, '_get_helper') gmgr = mock.Mock() ret = getattr(self._driver, "_%s_access_via_manager" % op )(gmgr, self._context, self.share, fake_share.fake_access, **kwargs) self._driver._get_helper.assert_called_once_with(gmgr) getattr( self._driver._get_helper(), "%s_access" % op).assert_called_once_with( '/', self.share, fake_share.fake_access) self.assertIsNone(ret) @ddt.ddt class GlusterNFSHelperTestCase(test.TestCase): """Tests GlusterNFSHelper.""" def setUp(self): super(GlusterNFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) def test_get_export(self): ret = self._helper.get_export(mock.Mock()) self.assertEqual(fake_gluster_manager_attrs['export'], ret) @ddt.data({'output_str': '/foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1)', 'expected': {'foo': ['10.0.0.1', '10.0.0.2'], 'bar': ['10.0.0.1']}}, {'output_str': None, 'expected': {}}) @ddt.unpack def test_get_export_dir_dict(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_export_dir_dict() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_EXPORT_DIR)) def test_manage_access_bad_access_type(self): cbk = None access = {'access_type': 'bad', 'access_to': None} self.assertRaises(exception.InvalidShareAccess, self._helper._manage_access, fake_share_name, access['access_type'], access['access_to'], cbk) def test_manage_access_noop(self): cbk = mock.Mock(return_value=True) access = fake_share.fake_access() export_dir_dict = mock.Mock() self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) ret = self._helper._manage_access(fake_share_name, access['access_type'], access['access_to'], cbk) self._helper._get_export_dir_dict.assert_called_once_with() cbk.assert_called_once_with(export_dir_dict, fake_share_name, access['access_to']) self.assertIsNone(ret) def test_manage_access_adding_entry(self): def cbk(d, key, value): d[key].append(value) access = fake_share.fake_access() export_dir_dict = { 'example.com': ['10.0.0.1'], 'fakename': ['10.0.0.2'], } export_str = '/example.com(10.0.0.1),/fakename(10.0.0.2|10.0.0.1)' args = (NFS_EXPORT_DIR, export_str) self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) ret = self._helper._manage_access(fake_share_name, access['access_type'], access['access_to'], cbk) self.assertIsNone(ret) self._helper._get_export_dir_dict.assert_called_once_with() self._helper.gluster_manager.set_vol_option.assert_called_once_with( *args) def test_manage_access_removing_last_entry(self): def cbk(d, key, value): d.pop(key) access = fake_share.fake_access() args = (NFS_EXPORT_DIR, None) export_dir_dict = {'fakename': ['10.0.0.1']} self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) ret = self._helper._manage_access(fake_share_name, access['access_type'], access['access_to'], cbk) self.assertIsNone(ret) self._helper._get_export_dir_dict.assert_called_once_with() self._helper.gluster_manager.set_vol_option.assert_called_once_with( *args) def test_allow_access_with_share_having_noaccess(self): access = fake_share.fake_access() share = fake_share.fake_share() export_dir_dict = {'example.com': ['10.0.0.1']} export_str = '/example.com(10.0.0.1),/fakename(10.0.0.1)' self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) self._helper.gluster_manager.path = '/fakename' self._helper.allow_access(None, share, access) self._helper._get_export_dir_dict.assert_called_once_with() self._helper.gluster_manager.set_vol_option.assert_called_once_with( NFS_EXPORT_DIR, export_str) def test_allow_access_with_share_having_access(self): access = fake_share.fake_access() share = fake_share.fake_share() export_dir_dict = {'fakename': ['10.0.0.1']} self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) self._helper.gluster_manager.path = '/fakename' self._helper.allow_access(None, share, access) self._helper._get_export_dir_dict.assert_called_once_with() self.assertFalse(self._helper.gluster_manager.set_vol_option.called) def test_deny_access_with_share_having_noaccess(self): access = fake_share.fake_access() share = fake_share.fake_share() export_dir_dict = {} self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) self._helper.gluster_manager.path = '/fakename' self._helper.deny_access(None, share, access) self._helper._get_export_dir_dict.assert_called_once_with() self.assertFalse(self._helper.gluster_manager.set_vol_option.called) def test_deny_access_with_share_having_access(self): access = fake_share.fake_access() share = fake_share.fake_share() export_dir_dict = { 'example.com': ['10.0.0.1'], 'fakename': ['10.0.0.1'], } export_str = '/example.com(10.0.0.1)' args = (NFS_EXPORT_DIR, export_str) self.mock_object(self._helper, '_get_export_dir_dict', mock.Mock(return_value=export_dir_dict)) self._helper.gluster_manager.path = '/fakename' self._helper.deny_access(None, share, access) self._helper._get_export_dir_dict.assert_called_once_with() self._helper.gluster_manager.set_vol_option.assert_called_once_with( *args) @ddt.ddt class GlusterNFSVolHelperTestCase(test.TestCase): """Tests GlusterNFSVolHelper.""" def setUp(self): super(GlusterNFSVolHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSVolHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) @ddt.data({'output_str': '10.0.0.1,10.0.0.2', 'expected': ['10.0.0.1', '10.0.0.2']}, {'output_str': None, 'expected': []}) @ddt.unpack def test_get_vol_exports(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_vol_exports() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_RPC_AUTH_ALLOW)) def test_manage_access_bad_access_type(self): cbk = None access = {'access_type': 'bad', 'access_to': None} self.assertRaises(exception.InvalidShareAccess, self._helper._manage_access, access['access_type'], access['access_to'], cbk) def test_manage_access_noop(self): cbk = mock.Mock(return_value=True) access = fake_share.fake_access() export_list = mock.Mock() self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) ret = self._helper._manage_access(access['access_type'], access['access_to'], cbk) self._helper._get_vol_exports.assert_called_once_with() cbk.assert_called_once_with(export_list, access['access_to']) self.assertIsNone(ret) def test_manage_access_adding_entry(self): def cbk(li, v): li.append(v) access = fake_share.fake_access() export_list = ['10.0.0.2'] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) ret = self._helper._manage_access(access['access_type'], access['access_to'], cbk) self.assertIsNone(ret) self._helper._get_vol_exports.assert_called_once_with() export_str = '10.0.0.2,10.0.0.1' argseq = ((NFS_RPC_AUTH_ALLOW, export_str), (NFS_RPC_AUTH_REJECT, None)) self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) def test_manage_access_removing_last_entry(self): def cbk(li, v): li.remove(v) access = fake_share.fake_access() export_list = ['10.0.0.1'] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) ret = self._helper._manage_access(access['access_type'], access['access_to'], cbk) self.assertIsNone(ret) self._helper._get_vol_exports.assert_called_once_with() argseq = ((NFS_RPC_AUTH_ALLOW, None), (NFS_RPC_AUTH_REJECT, '*')) self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) def test_allow_access_with_share_having_noaccess(self): access = fake_share.fake_access() share = fake_share.fake_share() export_list = ['10.0.0.2'] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) self._helper.allow_access(None, share, access) self._helper._get_vol_exports.assert_called_once_with() export_str = '10.0.0.2,10.0.0.1' argseq = ((NFS_RPC_AUTH_ALLOW, export_str), (NFS_RPC_AUTH_REJECT, None)) self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) def test_allow_access_with_share_having_access(self): access = fake_share.fake_access() share = fake_share.fake_share() export_list = ['10.0.0.1'] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) self._helper.allow_access(None, share, access) self._helper._get_vol_exports.assert_called_once_with() self.assertFalse(self._helper.gluster_manager.set_vol_option.called) def test_deny_access_with_share_having_noaccess(self): access = fake_share.fake_access() share = fake_share.fake_share() export_list = [] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) self._helper.deny_access(None, share, access) self._helper._get_vol_exports.assert_called_once_with() self.assertFalse(self._helper.gluster_manager.set_vol_option.called) def test_deny_access_with_share_having_access(self): access = fake_share.fake_access() share = fake_share.fake_share() export_list = ['10.0.0.1', '10.0.0.2'] self.mock_object(self._helper, '_get_vol_exports', mock.Mock(return_value=export_list)) self._helper.deny_access(None, share, access) self._helper._get_vol_exports.assert_called_once_with() export_str = '10.0.0.2' argseq = ((NFS_RPC_AUTH_ALLOW, export_str), (NFS_RPC_AUTH_REJECT, None)) self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) class GaneshaNFSHelperTestCase(test.TestCase): """Tests GaneshaNFSHelper.""" def setUp(self): super(GaneshaNFSHelperTestCase, self).setUp() self.gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self._root_execute = mock.Mock(return_value=('', '')) self.access = fake_share.fake_access() self.fake_conf = config.Configuration(None) self.fake_template = {'key': 'value'} self.share = fake_share.fake_share() self.mock_object(glusterfs.ganesha_utils, 'RootExecutor', mock.Mock(return_value=self._root_execute)) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '__init__', mock.Mock()) socket.gethostname = mock.Mock(return_value='example.com') self._helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) self._helper.tag = 'GLUSTER-Ganesha-localhost' def test_init_local_ganesha_server(self): glusterfs.ganesha_utils.RootExecutor.assert_called_once_with( self._execute) socket.gethostname.assert_has_calls([mock.call()]) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(self._root_execute, self.fake_conf, tag='GLUSTER-Ganesha-example.com')]) def test_get_export(self): ret = self._helper.get_export(self.share) self.assertEqual('example.com:/fakename--', ret) def test_init_remote_ganesha_server(self): ssh_execute = mock.Mock(return_value=('', '')) CONF.set_default('glusterfs_ganesha_server_ip', 'fakeip') self.mock_object(glusterfs.ganesha_utils, 'SSHExecutor', mock.Mock(return_value=ssh_execute)) glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) glusterfs.ganesha_utils.SSHExecutor.assert_called_once_with( 'fakeip', 22, None, 'root', password=None, privatekey=None) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(ssh_execute, self.fake_conf, tag='GLUSTER-Ganesha-fakeip')]) def test_init_helper(self): ganeshelper = mock.Mock() exptemp = mock.Mock() def set_attributes(*a, **kw): self._helper.ganesha = ganeshelper self._helper.export_template = exptemp self.mock_object(ganesha.GaneshaNASHelper, 'init_helper', mock.Mock(side_effect=set_attributes)) self.assertEqual({}, glusterfs.GaneshaNFSHelper.shared_data) self._helper.init_helper() ganesha.GaneshaNASHelper.init_helper.assert_called_once_with() self.assertEqual(ganeshelper, self._helper.ganesha) self.assertEqual(exptemp, self._helper.export_template) self.assertEqual({ 'GLUSTER-Ganesha-localhost': { 'ganesha': ganeshelper, 'export_template': exptemp}}, glusterfs.GaneshaNFSHelper.shared_data) other_helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) other_helper.tag = 'GLUSTER-Ganesha-localhost' other_helper.init_helper() self.assertEqual(ganeshelper, other_helper.ganesha) self.assertEqual(exptemp, other_helper.export_template) def test_default_config_hook(self): fake_conf_dict = {'key': 'value1'} mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2): mock_ganesha_utils_patch( copy.deepcopy(tmpl1), tmpl2) tmpl1.update(tmpl2) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '_default_config_hook', mock.Mock(return_value=self.fake_template)) self.mock_object(glusterfs.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir/glusterfs/conf')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_conf_dict)) self.mock_object(glusterfs.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) ret = self._helper._default_config_hook() glusterfs.ganesha.GaneshaNASHelper._default_config_hook.\ assert_called_once_with() glusterfs.ganesha_utils.path_from.assert_called_once_with( glusterfs.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir/glusterfs/conf') glusterfs.ganesha_utils.patch.assert_called_once_with( self.fake_template, fake_conf_dict) self.assertEqual(fake_conf_dict, ret) def test_fsal_hook(self): self._helper.gluster_manager.path = '/fakename' output = { 'Hostname': '127.0.0.1', 'Volume': 'testvol', 'Volpath': '/fakename' } ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual(output, ret) manila-2.0.0/manila/tests/share/drivers/cephfs/0000775000567000056710000000000012701407265022552 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/cephfs/test_cephfs_native.py0000664000567000056710000003314012701407107026775 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_utils import units from manila.common import constants from manila import context import manila.exception as exception from manila.share import configuration from manila.share.drivers.cephfs import cephfs_native from manila.share import share_types from manila import test from manila.tests import fake_share class MockVolumeClientModule(object): """Mocked up version of ceph's VolumeClient interface.""" class VolumePath(object): """Copy of VolumePath from CephFSVolumeClient.""" def __init__(self, group_id, volume_id): self.group_id = group_id self.volume_id = volume_id def __eq__(self, other): return (self.group_id == other.group_id and self.volume_id == other.volume_id) def __str__(self): return "{0}/{1}".format(self.group_id, self.volume_id) class CephFSVolumeClient(mock.Mock): mock_used_bytes = 0 def __init__(self, *args, **kwargs): mock.Mock.__init__(self, spec=[ "connect", "disconnect", "create_snapshot_volume", "destroy_snapshot_volume", "create_group", "destroy_group", "delete_volume", "purge_volume", "deauthorize", "evict", "set_max_bytes", "destroy_snapshot_group", "create_snapshot_group", "disconnect" ]) self.create_volume = mock.Mock(return_value={ "mount_path": "/foo/bar" }) self.get_mon_addrs = mock.Mock(return_value=["1.2.3.4", "5.6.7.8"]) self.authorize = mock.Mock(return_value={"auth_key": "abc123"}) self.get_used_bytes = mock.Mock(return_value=self.mock_used_bytes) self.rados = mock.Mock() self.rados.get_cluster_stats = mock.Mock(return_value={ "kb": 1000, "kb_avail": 500 }) class CephFSNativeDriverTestCase(test.TestCase): """Test the CephFS native driver. This is a very simple driver that mainly calls through to the CephFSVolumeClient interface, so the tests validate that the Manila driver calls map to the appropriate CephFSVolumeClient calls. """ def setUp(self): super(CephFSNativeDriverTestCase, self).setUp() self.fake_conf = configuration.Configuration(None) self._context = context.get_admin_context() self._share = fake_share.fake_share(share_proto='CEPHFS') self.fake_conf.set_default('driver_handles_share_servers', False) self.mock_object(cephfs_native, "ceph_volume_client", MockVolumeClientModule) self.mock_object(cephfs_native, "ceph_module_found", True) self._driver = ( cephfs_native.CephFSNativeDriver(configuration=self.fake_conf)) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value={})) def test_create_share(self): expected_export_locations = { 'path': '1.2.3.4,5.6.7.8:/foo/bar', 'is_admin_only': False, 'metadata': {}, } export_locations = self._driver.create_share(self._context, self._share) self.assertEqual(expected_export_locations, export_locations) self._driver._volume_client.create_volume.assert_called_once_with( self._driver._share_path(self._share), size=self._share['size'] * units.Gi, data_isolated=False) def test_ensure_share(self): self._driver.ensure_share(self._context, self._share) self._driver._volume_client.create_volume.assert_called_once_with( self._driver._share_path(self._share), size=self._share['size'] * units.Gi, data_isolated=False) def test_create_data_isolated(self): self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value={"cephfs:data_isolated": True}) ) self._driver.create_share(self._context, self._share) self._driver._volume_client.create_volume.assert_called_once_with( self._driver._share_path(self._share), size=self._share['size'] * units.Gi, data_isolated=True) def test_delete_share(self): self._driver.delete_share(self._context, self._share) self._driver._volume_client.delete_volume.assert_called_once_with( self._driver._share_path(self._share), data_isolated=False) self._driver._volume_client.purge_volume.assert_called_once_with( self._driver._share_path(self._share), data_isolated=False) def test_delete_data_isolated(self): self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value={"cephfs:data_isolated": True}) ) self._driver.delete_share(self._context, self._share) self._driver._volume_client.delete_volume.assert_called_once_with( self._driver._share_path(self._share), data_isolated=True) self._driver._volume_client.purge_volume.assert_called_once_with( self._driver._share_path(self._share), data_isolated=True) def test_allow_access(self): access_rule = { 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'cephx', 'access_to': 'alice' } self._driver._allow_access(self._context, self._share, access_rule) self._driver._volume_client.authorize.assert_called_once_with( self._driver._share_path(self._share), "alice") def test_allow_access_wrong_type(self): self.assertRaises(exception.InvalidShareAccess, self._driver._allow_access, self._context, self._share, { 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'RHUBARB', 'access_to': 'alice' }) def test_allow_access_ro(self): self.assertRaises(exception.InvalidShareAccessLevel, self._driver._allow_access, self._context, self._share, { 'access_level': constants.ACCESS_LEVEL_RO, 'access_type': 'cephx', 'access_to': 'alice' }) def test_deny_access(self): self._driver._deny_access(self._context, self._share, { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' }) self._driver._volume_client.deauthorize.assert_called_once_with( self._driver._share_path(self._share), "alice") def test_update_access_add_rm(self): alice = { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' } bob = { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'bob' } self._driver.update_access(self._context, self._share, access_rules=[alice], add_rules=[alice], delete_rules=[bob]) self._driver._volume_client.authorize.assert_called_once_with( self._driver._share_path(self._share), "alice") self._driver._volume_client.deauthorize.assert_called_once_with( self._driver._share_path(self._share), "bob") def test_update_access_all(self): alice = { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' } self._driver.update_access(self._context, self._share, access_rules=[alice], add_rules=[], delete_rules=[]) self._driver._volume_client.authorize.assert_called_once_with( self._driver._share_path(self._share), "alice") def test_extend_share(self): new_size_gb = self._share['size'] * 2 new_size = new_size_gb * units.Gi self._driver.extend_share(self._share, new_size_gb, None) self._driver._volume_client.set_max_bytes.assert_called_once_with( self._driver._share_path(self._share), new_size) def test_shrink_share(self): new_size_gb = self._share['size'] * 0.5 new_size = new_size_gb * units.Gi self._driver.shrink_share(self._share, new_size_gb, None) self._driver._volume_client.get_used_bytes.assert_called_once_with( self._driver._share_path(self._share)) self._driver._volume_client.set_max_bytes.assert_called_once_with( self._driver._share_path(self._share), new_size) def test_shrink_share_full(self): """That shrink fails when share is too full.""" new_size_gb = self._share['size'] * 0.5 # Pretend to be full up vc = MockVolumeClientModule.CephFSVolumeClient vc.mock_used_bytes = (units.Gi * self._share['size']) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self._share, new_size_gb, None) self._driver._volume_client.set_max_bytes.assert_not_called() def test_create_snapshot(self): self._driver.create_snapshot(self._context, { "share": self._share, "name": "snappy1" }, None) (self._driver._volume_client.create_snapshot_volume .assert_called_once_with( self._driver._share_path(self._share), "snappy1")) def test_delete_snapshot(self): self._driver.delete_snapshot(self._context, { "share": self._share, "name": "snappy1" }, None) (self._driver._volume_client.destroy_snapshot_volume .assert_called_once_with( self._driver._share_path(self._share), "snappy1")) def test_create_consistency_group(self): self._driver.create_consistency_group(self._context, {"id": "grp1"}, None) self._driver._volume_client.create_group.assert_called_once_with( "grp1") def test_delete_consistency_group(self): self._driver.delete_consistency_group(self._context, {"id": "grp1"}, None) self._driver._volume_client.destroy_group.assert_called_once_with( "grp1") def test_create_cg_snapshot(self): self._driver.create_cgsnapshot(self._context, { 'consistency_group_id': 'cgid', 'id': 'snapid' }) (self._driver._volume_client.create_snapshot_group. assert_called_once_with("cgid", "snapid")) def test_delete_cgsnapshot(self): self._driver.delete_cgsnapshot(self._context, { 'consistency_group_id': 'cgid', 'id': 'snapid' }) (self._driver._volume_client.destroy_snapshot_group. assert_called_once_with("cgid", "snapid")) def test_delete_driver(self): # Create share to prompt volume_client construction self._driver.create_share(self._context, self._share) vc = self._driver._volume_client del self._driver vc.disconnect.assert_called_once_with() def test_delete_driver_no_client(self): self.assertEqual(None, self._driver._volume_client) del self._driver def test_connect_noevict(self): # When acting as "admin", driver should skip evicting self._driver.configuration.local_conf.set_override('cephfs_auth_id', "admin") self._driver.create_share(self._context, self._share) vc = self._driver._volume_client vc.connect.assert_called_once_with(premount_evict=None) def test_update_share_stats(self): self._driver._volume_client self._driver._update_share_stats() result = self._driver._stats self.assertEqual("CEPHFS", result['storage_protocol']) def test_module_missing(self): cephfs_native.ceph_module_found = False cephfs_native.ceph_volume_client = None self.assertRaises(exception.ManilaException, self._driver.create_share, self._context, self._share) manila-2.0.0/manila/tests/share/drivers/cephfs/__init__.py0000664000567000056710000000000012701407107024644 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/test_generic.py0000664000567000056710000026031012701407112024320 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Generic driver module.""" import os import time import ddt import mock from oslo_concurrency import processutils from oslo_config import cfg from six import moves from manila.common import constants as const from manila import compute from manila import context from manila import exception import manila.share.configuration from manila.share.drivers import generic from manila.share import share_types from manila import test from manila.tests import fake_compute from manila.tests import fake_service_instance from manila.tests import fake_share from manila.tests import fake_volume from manila import utils from manila import volume CONF = cfg.CONF def get_fake_manage_share(): return { 'id': 'fake', 'share_proto': 'NFS', 'share_type_id': 'fake', 'export_locations': [ {'path': '10.0.0.1:/foo/fake/path'}, {'path': '11.0.0.1:/bar/fake/path'}, ], } def get_fake_snap_dict(): snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': '2015-08-10 00:05:58', 'updated_at': '2015-08-10 00:05:58', 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'cgsnapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': '2015-08-10 00:05:58', 'share': { 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', 'deleted': False, }, 'updated_at': '2015-08-10 00:05:58', 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', 'size': 1, }, ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None, } return snap_dict def get_fake_cg_dict(): cg_dict = { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': '2015-08-10 00:07:58', 'updated_at': None, 'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'openstack2@cmodeSSVMNFS', 'deleted_at': None, 'shares': [ { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'deleted': False, 'source_cgsnapshot_member_id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, ], 'share_types': [ { 'id': 'f6aa3b56-45a5-9631-02a32f06e1937b', 'deleted': False, 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', }, ], 'id': 'eda52174-0442-476d-9694-a58327466c14', 'name': None } return cg_dict def get_fake_collated_cg_snap_info(): fake_collated_cg_snap_info = [ { 'share': { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'deleted': False, 'source_cgsnapshot_member_id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, 'snapshot': { 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', }, }, ] return fake_collated_cg_snap_info def get_fake_access_rule(access_to, access_level, access_type='ip'): return { 'access_type': access_type, 'access_to': access_to, 'access_level': access_level, } @ddt.ddt class GenericShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(GenericShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() CONF.set_default('driver_handles_share_servers', True) self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) with mock.patch.object( generic.service_instance, 'ServiceInstanceManager', fake_service_instance.FakeServiceInstanceManager): self._driver = generic.GenericShareDriver( private_storage=self.fake_private_storage, execute=self._execute, configuration=self.fake_conf) self._driver.service_tenant_id = 'service tenant id' self._driver.service_network_id = 'service network id' self._driver.compute_api = fake_compute.API() self._driver.volume_api = fake_volume.API() self._driver.share_networks_locks = {} self._driver.get_service_instance = mock.Mock() self._driver.share_networks_servers = {} self._driver.admin_context = self._context self.fake_sn = {"id": "fake_sn_id"} self.fake_net_info = { "id": "fake_srv_id", "share_network_id": "fake_sn_id" } fsim = fake_service_instance.FakeServiceInstanceManager() sim = mock.Mock(return_value=fsim) self._driver.instance_manager = sim self._driver.service_instance_manager = sim self.fake_server = sim._create_service_instance( context="fake", instance_name="fake", share_network_id=self.fake_sn["id"], old_server_ip="fake") self.mock_object(utils, 'synchronized', mock.Mock(return_value=lambda f: f)) self.mock_object(generic.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share.fake_share(share_proto='NFS') self.server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': { 'ip': '1.2.3.4', 'instance_id': 'fake', 'service_ip': 'fake_ip', }, 'availability_zone': 'fake_az', } self.access = fake_share.fake_access() self.snapshot = fake_share.fake_snapshot() self.mock_object(time, 'sleep') self.mock_debug_log = self.mock_object(generic.LOG, 'debug') self.mock_warning_log = self.mock_object(generic.LOG, 'warning') self.mock_error_log = self.mock_object(generic.LOG, 'error') self.mock_exception_log = self.mock_object(generic.LOG, 'exception') @ddt.data(True, False) def test_do_setup_with_dhss(self, dhss): CONF.set_default('driver_handles_share_servers', dhss) fake_server = {'id': 'fake_server_id'} self.mock_object(volume, 'API') self.mock_object(compute, 'API') self.mock_object(self._driver, '_setup_helpers') self.mock_object( self._driver, '_is_share_server_active', mock.Mock(return_value=True)) self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=fake_server)) self._driver.do_setup(self._context) volume.API.assert_called_once_with() compute.API.assert_called_once_with() self._driver._setup_helpers.assert_called_once_with() if not dhss: self._driver.service_instance_manager.get_common_server.\ assert_called_once_with() self._driver._is_share_server_active.assert_called_once_with( self._context, fake_server) else: self.assertFalse( self._driver.service_instance_manager.get_common_server.called) self.assertFalse(self._driver._is_share_server_active.called) @mock.patch('time.sleep') def test_do_setup_dhss_false_server_avail_after_retry(self, mock_sleep): # This tests the scenario in which the common share server cannot be # retrieved during the first attempt, is not active during the second, # becoming active during the third attempt. CONF.set_default('driver_handles_share_servers', False) fake_server = {'id': 'fake_server_id'} self.mock_object(volume, 'API') self.mock_object(compute, 'API') self.mock_object(self._driver, '_setup_helpers') self.mock_object( self._driver, '_is_share_server_active', mock.Mock(side_effect=[False, True])) self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(side_effect=[exception.ManilaException, fake_server, fake_server])) self._driver.do_setup(self._context) volume.API.assert_called_once_with() compute.API.assert_called_once_with() self._driver._setup_helpers.assert_called_once_with() self._driver.service_instance_manager.get_common_server.\ assert_has_calls([mock.call()] * 3) self._driver._is_share_server_active.assert_has_calls( [mock.call(self._context, fake_server)] * 2) mock_sleep.assert_has_calls([mock.call(5)] * 2) def test_setup_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', ['NFS=fakenfs']) self.mock_object(generic.importutils, 'import_class', mock.Mock(return_value=self._helper_nfs)) self._driver._setup_helpers() generic.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) self._helper_nfs.assert_called_once_with( self._execute, self._driver._ssh_exec, self.fake_conf ) self.assertEqual(1, len(self._driver._helpers)) def test_setup_helpers_no_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', []) self.assertRaises(exception.ManilaException, self._driver._setup_helpers) def test_create_share(self): volume = 'fake_volume' volume2 = 'fake_volume2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=volume2)) self.mock_object(self._driver, '_format_device') self.mock_object(self._driver, '_mount_device') expected_el = [{ 'is_admin_only': False, 'path': 'fakelocation', 'metadata': {'export_location_metadata_example': 'example'}, }] result = self._driver.create_share( self._context, self.share, share_server=self.server) self.assertEqual(expected_el, result) self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], volume) self._driver._format_device.assert_called_once_with( self.server['backend_details'], volume2) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], volume2) def test_create_share_exception(self): share = fake_share.fake_share(share_network_id=None) self.assertRaises(exception.ManilaException, self._driver.create_share, self._context, share) def test_create_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share, self._context, self.share, share_server=self.server) def test_is_device_file_available(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=None)) self._driver._is_device_file_available(self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'test', '-b', volume['mountpoint']]) def test_format_device(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self.mock_object(self._driver, '_is_device_file_available') self._driver._format_device(self.server, volume) self._driver._is_device_file_available.assert_called_once_with( self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mkfs.%s' % self.fake_conf.share_volume_fstype, volume['mountpoint']]) def test_mount_device_not_present(self): server = {'instance_id': 'fake_server_id'} mount_path = self._driver._get_mount_path(self.share) volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._mount_device(self.share, server, volume) self._driver._is_device_mounted.assert_called_once_with( mount_path, server, volume) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( server) self._driver._ssh_exec.assert_called_once_with( server, ['sudo mkdir -p', mount_path, '&&', 'sudo mount', volume['mountpoint'], mount_path, '&& sudo chmod 777', mount_path], ) def test_mount_device_present(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._mount_device(self.share, self.server, volume) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server, volume) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_mount_device_exception_raised(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object( self._driver, '_is_device_mounted', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._mount_device, self.share, self.server, volume, ) self._driver._is_device_mounted.assert_called_once_with( self._driver._get_mount_path(self.share), self.server, volume) def test_unmount_device_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo umount', mount_path, '&& sudo rmdir', mount_path], ) def test_unmount_device_retry_once(self): self.counter = 0 def _side_effect(*args): self.counter += 1 if self.counter < 2: raise exception.ProcessExecutionError mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_sync_mount_temp_and_perm_files') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(side_effect=_side_effect)) self._driver._unmount_device(self.share, self.server) self.assertEqual(1, time.sleep.call_count) self.assertEqual([mock.call(self.share) for i in moves.range(2)], self._driver._get_mount_path.mock_calls) self.assertEqual([mock.call(mount_path, self.server) for i in moves.range(2)], self._driver._is_device_mounted.mock_calls) self._driver._sync_mount_temp_and_perm_files.assert_called_once_with( self.server) self.assertEqual( [mock.call(self.server, ['sudo umount', mount_path, '&& sudo rmdir', mount_path]) for i in moves.range(2)], self._driver._ssh_exec.mock_calls, ) def test_unmount_device_not_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_is_device_mounted_true(self): volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mount_path = '/fake/mount/path' mounts = "%(dev)s on %(path)s" % {'dev': volume['mountpoint'], 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertTrue(result) def test_is_device_mounted_true_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "/fake/dev/path on %(path)s type fake" % {'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertTrue(result) def test_is_device_mounted_false(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mounts = "%(dev)s on %(path)s" % {'dev': '/fake', 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertFalse(result) def test_is_device_mounted_false_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "%(path)s" % {'path': 'fake'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertFalse(result) def test_sync_mount_temp_and_perm_files(self): self.mock_object(self._driver, '_ssh_exec') self._driver._sync_mount_temp_and_perm_files(self.server) self._driver._ssh_exec.has_calls( mock.call( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a'])) def test_sync_mount_temp_and_perm_files_raise_error_on_copy(self): self.mock_object( self._driver, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._sync_mount_temp_and_perm_files, self.server ) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]) def test_sync_mount_temp_and_perm_files_raise_error_on_mount(self): def raise_error_on_mount(*args, **kwargs): if args[1][1] == 'cp': raise exception.ProcessExecutionError() self.mock_object(self._driver, '_ssh_exec', mock.Mock(side_effect=raise_error_on_mount)) self.assertRaises( exception.ShareBackendException, self._driver._sync_mount_temp_and_perm_files, self.server ) self._driver._ssh_exec.has_calls( mock.call( self.server, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a'])) def test_get_mount_path(self): result = self._driver._get_mount_path(self.share) self.assertEqual(os.path.join(CONF.share_mount_path, self.share['name']), result) def test_attach_volume_not_attached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=attached_volume)) result = self._driver._attach_volume(self._context, self.share, 'fake_inst_id', available_volume) self._driver.compute_api.instance_volume_attach.\ assert_called_once_with(self._context, 'fake_inst_id', available_volume['id']) self._driver.volume_api.get.assert_called_once_with( self._context, attached_volume['id']) self.assertEqual(attached_volume, result) def test_attach_volume_attached_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(attached_volume, result) def test_attach_volume_attached_incorrect(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') anoter_volume = fake_volume.FakeVolume(id='fake_id2', status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[anoter_volume])) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, attached_volume) @ddt.data(exception.ManilaException, exception.Invalid) def test_attach_volume_failed_attach(self, side_effect): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() self.mock_object(self._driver.compute_api, 'instance_volume_attach', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) self.assertEqual( 3, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_attached_retry_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='available') in_use_volume = fake_volume.FakeVolume(status='in-use') side_effect = [exception.Invalid("Fake"), attached_volume] attach_mock = mock.Mock(side_effect=side_effect) self.mock_object(self._driver.compute_api, 'instance_volume_attach', attach_mock) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=in_use_volume)) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(in_use_volume, result) self.assertEqual( 2, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_error(self): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() error_volume = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=error_volume)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) def test_get_volume(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(volume, result) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_with_private_data(self): volume = fake_volume.FakeVolume() self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume['id'])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(volume, result) self._driver.volume_api.get.assert_called_once_with( self._context, volume['id']) self.fake_private_storage.get.assert_called_once_with( self.share['id'], 'volume_id' ) def test_get_volume_none(self): vol_name = ( self._driver.configuration.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[])) result = self._driver._get_volume(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': vol_name}) def test_get_volume_error(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume, volume])) self.assertRaises(exception.ManilaException, self._driver._get_volume, self._context, self.share['id']) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_snapshot(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(volume_snapshot, result) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_get_volume_snapshot_with_private_data(self): volume_snapshot = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver.volume_api, 'get_snapshot', mock.Mock(return_value=volume_snapshot)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume_snapshot['id'])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(volume_snapshot, result) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, volume_snapshot['id']) self.fake_private_storage.get.assert_called_once_with( self.snapshot['id'], 'volume_snapshot_id' ) def test_get_volume_snapshot_none(self): snap_name = ( self._driver.configuration.volume_snapshot_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[])) result = self._driver._get_volume_snapshot(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': snap_name}) def test_get_volume_snapshot_error(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object( self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot, volume_snapshot])) self.assertRaises( exception.ManilaException, self._driver._get_volume_snapshot, self._context, self.snapshot['id']) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_detach_volume(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self._driver._detach_volume(self._context, self.share, self.server['backend_details']) self._driver.compute_api.instance_volume_detach.\ assert_called_once_with( self._context, self.server['backend_details']['instance_id'], available_volume['id']) self._driver.volume_api.get.assert_called_once_with( self._context, available_volume['id']) def test_detach_volume_detached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self._driver._detach_volume(self._context, self.share, self.server['backend_details']) self.assertFalse(self._driver.volume_api.get.called) self.assertFalse( self._driver.compute_api.instance_volume_detach.called) def test_allocate_container(self): fake_vol = fake_volume.FakeVolume() self.fake_conf.cinder_volume_type = 'fake_volume_type' self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share) self.assertEqual(fake_vol, result) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=None, volume_type='fake_volume_type', availability_zone=self.share['availability_zone']) def test_allocate_container_with_snaphot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap)) self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share, self.snapshot) self.assertEqual(fake_vol, result) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=fake_vol_snap, volume_type=None, availability_zone=self.share['availability_zone']) def test_allocate_container_error(self): fake_vol = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) self.assertRaises(exception.ManilaException, self._driver._allocate_container, self._context, self.share) def test_wait_for_available_volume(self): fake_volume = {'status': 'creating', 'id': 'fake'} fake_available_volume = {'status': 'available', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=fake_available_volume)) actual_result = self._driver._wait_for_available_volume( fake_volume, 5, "error", "timeout") self.assertEqual(fake_available_volume, actual_result) self._driver.volume_api.get.assert_called_once_with( mock.ANY, fake_volume['id']) @mock.patch('time.sleep') def test_wait_for_available_volume_error_extending(self, mock_sleep): fake_volume = {'status': 'error_extending', 'id': 'fake'} self.assertRaises(exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 5, 'error', 'timeout') self.assertFalse(mock_sleep.called) @mock.patch('time.sleep') def test_wait_for_extending_volume(self, mock_sleep): initial_size = 1 expected_size = 2 mock_volume = fake_volume.FakeVolume(status='available', size=initial_size) mock_extending_vol = fake_volume.FakeVolume(status='extending', size=initial_size) mock_extended_vol = fake_volume.FakeVolume(status='available', size=expected_size) self.mock_object(self._driver.volume_api, 'get', mock.Mock(side_effect=[mock_extending_vol, mock_extended_vol])) result = self._driver._wait_for_available_volume( mock_volume, 5, "error", "timeout", expected_size=expected_size) expected_get_count = 2 self.assertEqual(mock_extended_vol, result) self._driver.volume_api.get.assert_has_calls( [mock.call(self._driver.admin_context, mock_volume['id'])] * expected_get_count) mock_sleep.assert_has_calls([mock.call(1)] * expected_get_count) @ddt.data(mock.Mock(return_value={'status': 'creating', 'id': 'fake'}), mock.Mock(return_value={'status': 'error', 'id': 'fake'})) def test_wait_for_available_volume_invalid(self, volume_get_mock): fake_volume = {'status': 'creating', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', volume_get_mock) self.mock_object(time, 'time', mock.Mock(side_effect=[1.0, 1.33, 1.67, 2.0])) self.assertRaises( exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 1, "error", "timeout" ) def test_deallocate_container(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'delete') self.mock_object(self._driver.volume_api, 'get', mock.Mock( side_effect=exception.VolumeNotFound(volume_id=fake_vol['id']))) self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver.volume_api.delete.assert_called_once_with( self._context, fake_vol['id']) self._driver.volume_api.get.assert_called_once_with( self._context, fake_vol['id']) def test_deallocate_container_with_volume_not_found(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(side_effect=exception.VolumeNotFound( volume_id=fake_vol['id']))) self.mock_object(self._driver.volume_api, 'delete') self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver.volume_api.delete.called) def test_create_share_from_snapshot(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self._helper_nfs.create_export.return_value = 'fakelocation' expected_el = [{ 'is_admin_only': False, 'path': 'fakelocation', 'metadata': {'export_location_metadata_example': 'example'}, }] self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') result = self._driver.create_share_from_snapshot( self._context, self.share, self.snapshot, share_server=self.server) self.assertEqual(expected_el, result) self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share, self.snapshot) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_export.assert_called_once_with( self.server['backend_details'], self.share['name']) def test_create_share_from_snapshot_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot, share_server=self.server) def test_delete_share_no_share_servers_handling(self): self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server)) self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) CONF.set_default('driver_handles_share_servers', False) self._driver.delete_share(self._context, self.share) self._driver.service_instance_manager.get_common_server.\ assert_called_once_with() self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=self.server) self._helper_nfs.remove_export.assert_called_once_with( self.server['backend_details'], self.share['name']) self._driver._unmount_device.assert_called_once_with( self.share, self.server['backend_details']) self._driver._detach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share_without_share_server(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=None) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_backend_details(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') fake_share_server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': {} } self._driver.delete_share( self._context, self.share, share_server=fake_share_server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_availability(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) self._driver.delete_share( self._context, self.share, share_server=self.server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) self._driver.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_delete_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.delete_share, self._context, self.share, share_server=self.server) def test_create_snapshot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot(share_id=fake_vol['id']) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'create_snapshot_force', mock.Mock(return_value=fake_vol_snap)) self._driver.create_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._driver.admin_context, fake_vol_snap['share_id']) self._driver.volume_api.create_snapshot_force.assert_called_once_with( self._context, fake_vol['id'], CONF.volume_snapshot_name_template % fake_vol_snap['id'], '' ) def test_delete_snapshot(self): fake_vol_snap = fake_volume.FakeVolumeSnapshot() fake_vol_snap2 = {'id': 'fake_vol_snap2'} self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap2)) self.mock_object(self._driver.volume_api, 'delete_snapshot') self.mock_object( self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id=fake_vol_snap['id']))) self._driver.delete_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap['id']) self._driver.volume_api.delete_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) self._driver.volume_api.get_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) def test_ensure_share(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver._attach_volume.assert_called_once_with( self._context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_export.assert_called_once_with( self.server['backend_details'], self.share['name'], recreate=True) def test_ensure_share_volume_is_absent(self): self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=None)) self.mock_object(self._driver, '_attach_volume') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver._attach_volume.called) def test_ensure_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.ensure_share, self._context, self.share, share_server=self.server) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): # fakes access_rules = [get_fake_access_rule('1.1.1.1', access_level), get_fake_access_rule('2.2.2.2', access_level)] add_rules = [get_fake_access_rule('2.2.2.2', access_level), ] delete_rules = [get_fake_access_rule('3.3.3.3', access_level), ] # run self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, share_server=self.server) # asserts self._driver._helpers[self.share['share_proto']].\ update_access.assert_called_once_with( self.server['backend_details'], self.share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) @ddt.data(fake_share.fake_share(), fake_share.fake_share(share_proto='NFSBOGUS'), fake_share.fake_share(share_proto='CIFSBOGUS')) def test__get_helper_with_wrong_proto(self, share): self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test__setup_server(self): sim = self._driver.instance_manager net_info = { 'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id', } self._driver.setup_server(net_info) sim.set_up_service_instance.assert_called_once_with( self._context, net_info) def test__setup_server_revert(self): def raise_exception(*args, **kwargs): raise exception.ServiceInstanceException net_info = {'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id'} self.mock_object(self._driver.service_instance_manager, 'set_up_service_instance', mock.Mock(side_effect=raise_exception)) self.assertRaises(exception.ServiceInstanceException, self._driver.setup_server, net_info) def test__teardown_server(self): server_details = { 'instance_id': 'fake_instance_id', 'subnet_id': 'fake_subnet_id', 'router_id': 'fake_router_id', } self._driver.teardown_server(server_details) self._driver.service_instance_manager.delete_service_instance.\ assert_called_once_with( self._driver.admin_context, server_details) def test_ssh_exec_connection_not_exist(self): ssh_conn_timeout = 30 CONF.set_default('ssh_conn_timeout', ssh_conn_timeout) ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = {} result = self._driver._ssh_exec(self.server, cmd) utils.SSHPool.assert_called_once_with( self.server['ip'], 22, ssh_conn_timeout, self.server['username'], self.server['password'], self.server['pk_path'], max_size=1) ssh_pool.create.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_exist(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: True) ssh_pool = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_recreation(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: False) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(side_effect=lambda: ssh) ssh_pool.remove = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() ssh_pool.create.assert_called_once_with() ssh_pool.remove.assert_called_once_with(ssh) self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): fake_stats = {'fake_key': 'fake_value'} self._driver._stats = fake_stats expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', ] result = self._driver.get_share_stats(True) self.assertNotEqual(fake_stats, result) for key in expected_keys: self.assertIn(key, result) self.assertTrue(result['driver_handles_share_servers']) self.assertEqual('Open Source', result['vendor_name']) def _setup_manage_mocks(self, get_share_type_extra_specs='False', is_device_mounted=True, server_details=None): CONF.set_default('driver_handles_share_servers', False) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=get_share_type_extra_specs)) self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=is_device_mounted)) self.mock_object(self._driver, 'service_instance_manager') server = {'backend_details': server_details} self.mock_object(self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=server)) def test_manage_invalid_protocol(self): share = {'share_proto': 'fake_proto'} self._setup_manage_mocks() self.assertRaises(exception.InvalidShare, self._driver.manage_existing, share, {}) def test_manage_not_mounted_share(self): share = get_fake_manage_share() fake_path = '/foo/bar' self._setup_manage_mocks(is_device_mounted=False) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self.assertEqual( 1, self._driver.service_instance_manager.get_common_server.call_count) self._driver._is_device_mounted.assert_called_once_with( fake_path, None) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( None, share['export_locations'][0]['path']) def test_manage_share_not_attached_to_cinder_volume_invalid_size(self): share = get_fake_manage_share() server_details = {} fake_path = '/foo/bar' self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=None)) error = exception.ManageInvalidShare(reason="fake") self.mock_object( self._driver, '_get_mounted_share_size', mock.Mock(side_effect=error)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path']) def test_manage_share_not_attached_to_cinder_volume(self): share = get_fake_manage_share() share_size = "fake" fake_path = '/foo/bar' fake_exports = ['foo', 'bar'] server_details = {} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume') self.mock_object(self._driver, '_get_mounted_share_size', mock.Mock(return_value=share_size)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, {}) self.assertEqual( {'size': share_size, 'export_locations': fake_exports}, result) self._driver._helpers[share['share_proto']].get_exports_for_share.\ assert_called_once_with( server_details, share['export_locations'][0]['path']) self._driver._helpers[share['share_proto']].\ get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path']) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) self.assertFalse(self._driver._get_volume.called) def test_manage_share_attached_to_cinder_volume_not_found(self): share = get_fake_manage_share() server_details = {} driver_options = {'volume_id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object( self._driver.volume_api, 'get', mock.Mock(side_effect=exception.VolumeNotFound(volume_id="fake")) ) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) def test_manage_share_attached_to_cinder_volume_not_mounted_to_srv(self): share = get_fake_manage_share() server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) self._driver.compute_api.instance_volumes_list.assert_called_once_with( mock.ANY, server_details['instance_id']) def test_manage_share_attached_to_cinder_volume(self): share = get_fake_manage_share() fake_size = 'foobar' fake_exports = ['foo', 'bar'] server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake', 'name': 'fake_volume_1', 'size': fake_size} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self._driver.volume_api.update = mock.Mock() fake_volume = mock.Mock() fake_volume.id = 'fake' self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[fake_volume])) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, driver_options) self.assertEqual( {'size': fake_size, 'export_locations': fake_exports}, result) self._driver._helpers[share['share_proto']].get_exports_for_share.\ assert_called_once_with( server_details, share['export_locations'][0]['path']) expected_volume_update = { 'name': self._driver._get_volume_name(share['id']) } self._driver.volume_api.update.assert_called_once_with( mock.ANY, volume['id'], expected_volume_update) self.fake_private_storage.update.assert_called_once_with( share['id'], {'volume_id': volume['id']} ) def test_get_mounted_share_size(self): output = ("Filesystem blocks Used Available Capacity Mounted on\n" "/dev/fake 1G 1G 1G 4% /shares/share-fake") self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) actual_result = self._driver._get_mounted_share_size('/fake/path', {}) self.assertEqual(1, actual_result) @ddt.data("fake\nfake\n", "fake", "fake\n") def test_get_mounted_share_size_invalid_output(self, output): self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) self.assertRaises(exception.ManageInvalidShare, self._driver._get_mounted_share_size, '/fake/path', {}) def test_get_consumed_space(self): mount_path = "fake_path" server_details = {} index = 2 valid_result = 1 self.mock_object(self._driver, '_get_mount_stats_by_index', mock.Mock(return_value=valid_result * 1024)) actual_result = self._driver._get_consumed_space( mount_path, server_details) self.assertEqual(valid_result, actual_result) self._driver._get_mount_stats_by_index.assert_called_once_with( mount_path, server_details, index, block_size='M' ) def test_get_consumed_space_invalid(self): self.mock_object( self._driver, '_get_mount_stats_by_index', mock.Mock(side_effect=exception.ManilaException("fake")) ) self.assertRaises( exception.InvalidShare, self._driver._get_consumed_space, "fake", "fake" ) def test_extend_share(self): fake_volume = "fake" fake_share = { 'id': 'fake', 'share_proto': 'NFS', 'name': 'test_share', } new_size = 123 srv_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_extend_volume') self.mock_object(self._driver, '_attach_volume') self.mock_object(self._driver, '_mount_device') self.mock_object(self._driver, '_resize_filesystem') self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=fake_volume) ) CONF.set_default('driver_handles_share_servers', False) self._driver.extend_share(fake_share, new_size) self.assertTrue( self._driver.service_instance_manager.get_common_server.called) self._driver._unmount_device.assert_called_once_with( fake_share, srv_details) self._driver._detach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details) self._driver._get_volume.assert_called_once_with( mock.ANY, fake_share['id']) self._driver._extend_volume.assert_called_once_with( mock.ANY, fake_volume, new_size) self._driver._attach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details['instance_id'], mock.ANY) self._helper_nfs.disable_access_for_maintenance.\ assert_called_once_with(srv_details, 'test_share') self._helper_nfs.restore_access_after_maintenance.\ assert_called_once_with(srv_details, 'test_share') self.assertTrue(self._driver._resize_filesystem.called) def test_extend_volume(self): fake_volume = {'id': 'fake'} new_size = 123 self.mock_object(self._driver.volume_api, 'extend') self.mock_object(self._driver, '_wait_for_available_volume') self._driver._extend_volume(self._context, fake_volume, new_size) self._driver.volume_api.extend.assert_called_once_with( self._context, fake_volume['id'], new_size ) self._driver._wait_for_available_volume.assert_called_once_with( fake_volume, mock.ANY, msg_timeout=mock.ANY, msg_error=mock.ANY, expected_size=new_size ) def test_resize_filesystem(self): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} self.mock_object(self._driver, '_ssh_exec') self._driver._resize_filesystem( fake_server_details, fake_volume, new_size=123) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'fsck', '-pf', '/dev/fake']) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'resize2fs', '/dev/fake', "%sG" % 123] ) self.assertEqual(2, self._driver._ssh_exec.call_count) @ddt.data( { 'source': processutils.ProcessExecutionError( stderr="resize2fs: New size smaller than minimum (123456)"), 'target': exception.Invalid }, { 'source': processutils.ProcessExecutionError(stderr="fake_error"), 'target': exception.ManilaException } ) @ddt.unpack def test_resize_filesystem_invalid_new_size(self, source, target): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} ssh_mock = mock.Mock(side_effect=["fake", source]) self.mock_object(self._driver, '_ssh_exec', ssh_mock) self.assertRaises( target, self._driver._resize_filesystem, fake_server_details, fake_volume, new_size=123 ) def test_shrink_share_invalid_size(self): fake_share = {'id': 'fake', 'export_locations': [{'path': 'test'}]} new_size = 123 self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_get_helper') self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=200)) CONF.set_default('driver_handles_share_servers', False) self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, fake_share, new_size ) self._driver._get_helper.assert_called_once_with(fake_share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, self.server['backend_details']) def _setup_shrink_mocks(self): share = {'id': 'fake', 'export_locations': [{'path': 'test'}], 'name': 'fake'} volume = {'id': 'fake'} new_size = 123 server_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) helper = mock.Mock() self.mock_object(self._driver, '_get_helper', mock.Mock(return_value=helper)) self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=100)) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_mount_device') CONF.set_default('driver_handles_share_servers', False) return share, volume, new_size, server_details, helper @ddt.data({'source': exception.Invalid("fake"), 'target': exception.ShareShrinkingPossibleDataLoss}, {'source': exception.ManilaException("fake"), 'target': exception.Invalid}) @ddt.unpack def test_shrink_share_error_on_resize_fs(self, source, target): share, vol, size, server_details, _ = self._setup_shrink_mocks() resize_mock = mock.Mock(side_effect=source) self.mock_object(self._driver, '_resize_filesystem', resize_mock) self.assertRaises(target, self._driver.shrink_share, share, size) resize_mock.assert_called_once_with(server_details, vol, new_size=size) def test_shrink_share(self): share, vol, size, server_details, helper = self._setup_shrink_mocks() self.mock_object(self._driver, '_resize_filesystem') self._driver.shrink_share(share, size) self._driver._get_helper.assert_called_once_with(share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, server_details) self._driver._get_volume.assert_called_once_with(mock.ANY, share['id']) self._driver._unmount_device.assert_called_once_with(share, server_details) self._driver._resize_filesystem( server_details, vol, new_size=size) self._driver._mount_device(share, server_details, vol) self.assertTrue(helper.disable_access_for_maintenance.called) self.assertTrue(helper.restore_access_after_maintenance.called) @ddt.data({'share_servers': [], 'result': None}, {'share_servers': None, 'result': None}, {'share_servers': ['fake'], 'result': 'fake'}, {'share_servers': ['fake', 'test'], 'result': 'fake'}) @ddt.unpack def tests_choose_share_server_compatible_with_share(self, share_servers, result): fake_share = "fake" actual_result = self._driver.choose_share_server_compatible_with_share( self._context, share_servers, fake_share ) self.assertEqual(result, actual_result) @ddt.data({'consistency_group': {'share_server_id': 'fake'}, 'result': {'id': 'fake'}}, {'consistency_group': None, 'result': {'id': 'fake'}}, {'consistency_group': {'share_server_id': 'test'}, 'result': {'id': 'test'}}) @ddt.unpack def tests_choose_share_server_compatible_with_share_and_cg( self, consistency_group, result): share_servers = [{'id': 'fake'}, {'id': 'test'}] fake_share = "fake" actual_result = self._driver.choose_share_server_compatible_with_share( self._context, share_servers, fake_share, consistency_group=consistency_group ) self.assertEqual(result, actual_result) def test_create_consistency_group(self): FAKE_SNAP_DICT = get_fake_snap_dict() result = self._driver.create_consistency_group( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertIsNone(result) def test_delete_consistency_group(self): FAKE_SNAP_DICT = get_fake_snap_dict() result = self._driver.delete_consistency_group( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertIsNone(result) def test_create_cgsnapshot_no_cg_members(self): FAKE_SNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'create_snapshot') result = self._driver.create_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_debug_log.call_count) self.assertEqual(2, self.mock_warning_log.call_count) self.assertFalse(mock_snapshot_creation.called) self.assertEqual((None, None), result) @ddt.data( { 'delete_snap_side_effect': None, 'expected_error_log_call_count': 0, }, { 'delete_snap_side_effect': exception.ManilaException, 'expected_error_log_call_count': 1, } ) @ddt.unpack def test_create_cgsnapshot_manila_exception_on_create_and_delete( self, delete_snap_side_effect, expected_error_log_call_count): FAKE_SNAP_DICT = get_fake_snap_dict() # Append another fake share FAKE_SHARE = dict(FAKE_SNAP_DICT['cgsnapshot_members'][0]) FAKE_SNAP_DICT['cgsnapshot_members'].append(FAKE_SHARE) self.mock_object(generic.GenericShareDriver, 'create_snapshot', mock.Mock(side_effect=[ None, exception.ManilaException, ])) self.mock_object(generic.GenericShareDriver, 'delete_snapshot', mock.Mock(side_effect=delete_snap_side_effect)) self.assertRaises(exception.ManilaException, self._driver.create_cgsnapshot, self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertEqual(1, self.mock_exception_log.call_count) self.assertEqual(expected_error_log_call_count, self.mock_error_log.call_count) def test_create_cgsnapshot(self): FAKE_SNAP_DICT = get_fake_snap_dict() FAKE_SHARE_SNAPSHOT = { 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', } mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'create_snapshot') result = self._driver.create_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) mock_snapshot_creation.assert_called_once_with(self._context, FAKE_SHARE_SNAPSHOT, self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual(1, self.mock_warning_log.call_count) self.assertFalse(self.mock_error_log.called) self.assertEqual((None, None), result) def test_delete_cgsnapshot_manila_exception(self): FAKE_SNAP_DICT = get_fake_snap_dict() self.mock_object(generic.GenericShareDriver, 'delete_snapshot', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._driver.delete_cgsnapshot, self._context, FAKE_SNAP_DICT, share_server=self.server) self.assertEqual(1, self.mock_error_log.call_count) def test_delete_cgsnapshot(self): FAKE_SNAP_DICT = get_fake_snap_dict() FAKE_SHARE_SNAPSHOT = { 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'id': '03e2f06e-14f2-45a5-9631-0949d1937bd8', } mock_snapshot_creation = self.mock_object(generic.GenericShareDriver, 'delete_snapshot') result = self._driver.delete_cgsnapshot( self._context, FAKE_SNAP_DICT, share_server=self.server) mock_snapshot_creation.assert_called_once_with(self._context, FAKE_SHARE_SNAPSHOT, self.server) self.assertEqual(2, self.mock_debug_log.call_count) self.assertEqual((None, None), result) def test_create_consistency_group_from_cgsnapshot_no_members(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) mock_share_creation = self.mock_object(generic.GenericShareDriver, 'create_share_from_snapshot') result = self._driver.create_consistency_group_from_cgsnapshot( self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT, share_server=self.server) self.assertFalse(self.mock_debug_log.called) self.assertFalse(mock_share_creation.called) self.assertEqual((None, None), result) def test_create_consistency_group_from_cgsnapshot(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = get_fake_snap_dict() FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info() FAKE_SHARE_UPDATE_LIST = [ { 'id': '02a32f06e-14f2-45a5-9631-7483f1937bd8', 'export_locations': 'xyzzy', } ] self.mock_object(generic.GenericShareDriver, '_collate_cg_snapshot_info', mock.Mock(return_value=FAKE_COLLATED_INFO)) mock_share_creation = self.mock_object(generic.GenericShareDriver, 'create_share_from_snapshot', mock.Mock(return_value='xyzzy')) result = self._driver.create_consistency_group_from_cgsnapshot( self._context, FAKE_CG_DICT, FAKE_CGSNAP_DICT, share_server=self.server) self.assertEqual((None, FAKE_SHARE_UPDATE_LIST), result) self.assertEqual(1, self.mock_debug_log.call_count) mock_share_creation.assert_called_once_with( self._context, FAKE_COLLATED_INFO[0]['share'], FAKE_COLLATED_INFO[0]['snapshot'], share_server=self.server ) def test_collate_cg_snapshot_info_invalid_cg(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = dict(get_fake_snap_dict(), cgsnapshot_members=[]) self.assertRaises(exception.InvalidConsistencyGroup, self._driver._collate_cg_snapshot_info, FAKE_CG_DICT, FAKE_CGSNAP_DICT) def test_collate_cg_snapshot(self): FAKE_CG_DICT = get_fake_cg_dict() FAKE_CGSNAP_DICT = get_fake_snap_dict() FAKE_COLLATED_INFO = get_fake_collated_cg_snap_info() result = self._driver._collate_cg_snapshot_info( FAKE_CG_DICT, FAKE_CGSNAP_DICT) self.assertEqual(FAKE_COLLATED_INFO, result) def test_manage_snapshot_not_found(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} driver_options = {} self.mock_object( self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id='vol_snap_id'))) self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, snapshot_instance, driver_options) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, 'vol_snap_id') def test_manage_snapshot_valid(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} volume_snapshot = {'id': 'vol_snap_id', 'size': 1} self.mock_object(self._driver.volume_api, 'get_snapshot', mock.Mock(return_value=volume_snapshot)) ret_manage = self._driver.manage_existing_snapshot( snapshot_instance, {}) self.assertEqual({'provider_location': 'vol_snap_id', 'size': 1}, ret_manage) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, 'vol_snap_id') def test_unmanage_snapshot(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} self.mock_object(self._driver.private_storage, 'delete') self._driver.unmanage_snapshot(snapshot_instance) self._driver.private_storage.delete.assert_called_once_with( 'snap_instance_id') @generic.ensure_server def fake(driver_instance, context, share_server=None): return share_server @ddt.ddt class GenericDriverEnsureServerTestCase(test.TestCase): def setUp(self): super(GenericDriverEnsureServerTestCase, self).setUp() self._context = context.get_admin_context() self.server = {'id': 'fake_id', 'backend_details': {'foo': 'bar'}} self.dhss_false = type( 'Fake', (object,), {'driver_handles_share_servers': False}) self.dhss_true = type( 'Fake', (object,), {'driver_handles_share_servers': True}) def test_share_servers_are_not_handled_server_not_provided(self): self.dhss_false.service_instance_manager = mock.Mock() self.dhss_false.service_instance_manager.get_common_server = ( mock.Mock(return_value=self.server)) self.dhss_false.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_false, self._context) self.assertEqual(self.server, actual) self.dhss_false.service_instance_manager.\ get_common_server.assert_called_once_with() self.dhss_false.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) @ddt.data({'id': 'without_details'}, {'id': 'with_details', 'backend_details': {'foo': 'bar'}}) def test_share_servers_are_not_handled_server_provided(self, server): self.assertRaises( exception.ManilaException, fake, self.dhss_false, self._context, share_server=server) def test_share_servers_are_handled_server_provided(self): self.dhss_true.service_instance_manager = mock.Mock() self.dhss_true.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_true, self._context, share_server=self.server) self.assertEqual(self.server, actual) self.dhss_true.service_instance_manager.ensure_service_instance.\ assert_called_once_with( self._context, self.server['backend_details']) def test_share_servers_are_handled_invalid_server_provided(self): server = {'id': 'without_details'} self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context, share_server=server) def test_share_servers_are_handled_server_not_provided(self): self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context) manila-2.0.0/manila/tests/share/drivers/test_ganesha.py0000664000567000056710000002633212701407107024322 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import os import mock from oslo_config import cfg from manila import exception from manila.share import configuration as config from manila.share.drivers import ganesha from manila import test from manila.tests import fake_share CONF = cfg.CONF fake_basepath = '/fakepath' fake_export_name = 'fakename--fakeaccid' fake_output_template = { 'EXPORT': { 'Export_Id': 101, 'Path': '/fakepath/fakename', 'Pseudo': '/fakepath/fakename--fakeaccid', 'Tag': 'fakeaccid', 'CLIENT': { 'Clients': '10.0.0.1' }, 'FSAL': 'fakefsal' } } class GaneshaNASHelperTestCase(test.TestCase): """Tests GaneshaNASHElper.""" def setUp(self): super(GaneshaNASHelperTestCase, self).setUp() CONF.set_default('ganesha_config_path', '/fakedir0/fakeconfig') CONF.set_default('ganesha_db_path', '/fakedir1/fake.db') CONF.set_default('ganesha_export_dir', '/fakedir0/export.d') CONF.set_default('ganesha_export_template_dir', '/fakedir2/faketempl.d') CONF.set_default('ganesha_service_name', 'ganesha.fakeservice') self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self.fake_conf_dir_path = '/fakedir0/exports.d' self._helper = ganesha.GaneshaNASHelper( self._execute, self.fake_conf, tag='faketag') self._helper.ganesha = mock.Mock() self._helper.export_template = {'key': 'value'} self.share = fake_share.fake_share() self.access = fake_share.fake_access() def test_load_conf_dir(self): fake_template1 = {'key': 'value1'} fake_template2 = {'key': 'value2'} fake_ls_dir = ['fakefile0.conf', 'fakefile1.json', 'fakefile2.txt'] mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2): mock_ganesha_utils_patch( copy.deepcopy(tmpl1), copy.deepcopy(tmpl2)) tmpl1.update(tmpl2) self.mock_object(ganesha.os, 'listdir', mock.Mock(return_value=fake_ls_dir)) self.mock_object(ganesha.LOG, 'info') self.mock_object(ganesha.ganesha_manager, 'parseconf', mock.Mock(side_effect=[fake_template1, fake_template2])) self.mock_object(ganesha.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) with mock.patch('six.moves.builtins.open', mock.mock_open()) as mockopen: mockopen().read.side_effect = ['fakeconf0', 'fakeconf1'] ret = self._helper._load_conf_dir(self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with( self.fake_conf_dir_path) ganesha.LOG.info.assert_called_once_with( mock.ANY, self.fake_conf_dir_path) mockopen.assert_has_calls([ mock.call('/fakedir0/exports.d/fakefile0.conf'), mock.call('/fakedir0/exports.d/fakefile1.json')], any_order=True) ganesha.ganesha_manager.parseconf.assert_has_calls([ mock.call('fakeconf0'), mock.call('fakeconf1')]) mock_ganesha_utils_patch.assert_has_calls([ mock.call({}, fake_template1), mock.call(fake_template1, fake_template2)]) self.assertEqual(fake_template2, ret) def test_load_conf_dir_no_conf_dir_must_exist_false(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.ENOENT, os.strerror(errno.ENOENT)))) self.mock_object(ganesha.LOG, 'info') self.mock_object(ganesha.ganesha_manager, 'parseconf') self.mock_object(ganesha.ganesha_utils, 'patch') with mock.patch('six.moves.builtins.open', mock.mock_open(read_data='fakeconf')) as mockopen: ret = self._helper._load_conf_dir(self.fake_conf_dir_path, must_exist=False) ganesha.os.listdir.assert_called_once_with( self.fake_conf_dir_path) ganesha.LOG.info.assert_called_once_with( mock.ANY, self.fake_conf_dir_path) self.assertFalse(mockopen.called) self.assertFalse(ganesha.ganesha_manager.parseconf.called) self.assertFalse(ganesha.ganesha_utils.patch.called) self.assertEqual({}, ret) def test_load_conf_dir_error_no_conf_dir_must_exist_true(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.ENOENT, os.strerror(errno.ENOENT)))) self.assertRaises(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error_conf_dir_present_must_exist_false(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))) self.assertRaises(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path, must_exist=False) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=RuntimeError('fake error'))) self.assertRaises(RuntimeError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_init_helper(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=mock_template)) self.mock_object(self._helper, '_default_config_hook') ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self.assertFalse(self._helper._default_config_hook.called) self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_init_helper_conf_dir_empty(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value={})) self.mock_object(self._helper, '_default_config_hook', mock.Mock(return_value=mock_template)) ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self._helper._default_config_hook.assert_called_once_with() self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_default_config_hook(self): fake_template = {'key': 'value'} self.mock_object(ganesha.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir3/fakeconfdir')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_template)) ret = self._helper._default_config_hook() ganesha.ganesha_utils.path_from.assert_called_once_with( ganesha.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir3/fakeconfdir') self.assertEqual(fake_template, ret) def test_fsal_hook(self): ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual({}, ret) def test_allow_access(self): mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2, tmpl3): mock_ganesha_utils_patch(copy.deepcopy(tmpl1), tmpl2, tmpl3) tmpl1.update(tmpl3) self.mock_object(self._helper.ganesha, 'get_export_id', mock.Mock(return_value=101)) self.mock_object(self._helper, '_fsal_hook', mock.Mock(return_value='fakefsal')) self.mock_object(ganesha.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) ret = self._helper.allow_access(fake_basepath, self.share, self.access) self._helper.ganesha.get_export_id.assert_called_once_with() self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access) mock_ganesha_utils_patch.assert_called_once_with( {}, self._helper.export_template, fake_output_template) self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access) self._helper.ganesha.add_export.assert_called_once_with( fake_export_name, fake_output_template) self.assertIsNone(ret) def test_allow_access_error_invalid_share(self): access = fake_share.fake_access(access_type='notip') self.assertRaises(exception.InvalidShareAccess, self._helper.allow_access, '/fakepath', self.share, access) def test_deny_access(self): ret = self._helper.deny_access('/fakepath', self.share, self.access) self._helper.ganesha.remove_export.assert_called_once_with( 'fakename--fakeaccid') self.assertIsNone(ret) manila-2.0.0/manila/tests/share/drivers/ibm/0000775000567000056710000000000012701407265022051 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/ibm/test_ganesha_utils.py0000664000567000056710000002625512701407107026315 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Ganesha Utils module.""" import socket import time import mock from oslo_config import cfg from manila import exception import manila.share.drivers.ibm.ganesha_utils as ganesha_utils from manila import test from manila import utils CONF = cfg.CONF def fake_pre_lines(**kwargs): pre_lines = [ '###################################################', '# Export entries', '###################################################', '', '', '# First export entry', ] return pre_lines def fake_exports(**kwargs): exports = { '100': { 'anonymous_root_uid': '-2', 'export_id': '100', 'filesystem_id': '192.168', 'fsal': '"GPFS"', 'maxread': '65536', 'maxwrite': '65536', 'nfs_protocols': '"3,4"', 'path': '"/fs0/share-1234"', 'prefread': '65536', 'prefwrite': '65536', 'pseudo': '"/fs0/share-1234"', 'root_access': '"*"', 'rw_access': '""', 'sectype': '"sys"', 'tag': '"fs100"', 'transport_protocols': '"UDP,TCP"', }, '101': { 'anonymous_root_uid': '-2', 'export_id': '101', 'filesystem_id': '192.168', 'fsal': '"GPFS"', 'maxread': '65536', 'maxwrite': '65536', 'nfs_protocols': '"3,4"', 'path': '"/fs0/share-5678"', 'prefread': '65536', 'prefwrite': '65536', 'pseudo': '"/fs0/share-5678"', 'root_access': '"*"', 'rw_access': '"172.24.4.4"', 'sectype': '"sys"', 'tag': '"fs101"', 'transport_protocols': '"UDP,TCP"', }, } return exports class GaneshaUtilsTestCase(test.TestCase): """Tests Ganesha Utils.""" def setUp(self): super(GaneshaUtilsTestCase, self).setUp() self.fake_path = "/fs0/share-1234" self.fake_pre_lines = fake_pre_lines() self.fake_exports = fake_exports() self.fake_configpath = "/etc/ganesha/ganesha.exports.conf" self.local_ip = ["192.11.22.1"] self.remote_ips = ["192.11.22.2", "192.11.22.3"] self.servers = self.local_ip + self.remote_ips self.sshlogin = "fake_login" self.sshkey = "fake_sshkey" self.STARTING_EXPORT_ID = 100 self.mock_object(socket, 'gethostname', mock.Mock(return_value="testserver")) self.mock_object(socket, 'gethostbyname_ex', mock.Mock( return_value=('localhost', ['localhost.localdomain', 'testserver'], ['127.0.0.1'] + self.local_ip) )) def test_get_export_by_path(self): fake_export = {'export_id': '100'} self.mock_object(ganesha_utils, '_get_export_by_path', mock.Mock(return_value=fake_export)) export = ganesha_utils.get_export_by_path(self.fake_exports, self.fake_path) self.assertEqual(export, fake_export) ganesha_utils._get_export_by_path.assert_called_once_with( self.fake_exports, self.fake_path ) def test_export_exists(self): fake_export = {'export_id': '100'} self.mock_object(ganesha_utils, '_get_export_by_path', mock.Mock(return_value=fake_export)) result = ganesha_utils.export_exists(self.fake_exports, self.fake_path) self.assertTrue(result) ganesha_utils._get_export_by_path.assert_called_once_with( self.fake_exports, self.fake_path ) def test__get_export_by_path_export_exists(self): expected_export = { 'anonymous_root_uid': '-2', 'export_id': '100', 'filesystem_id': '192.168', 'fsal': '"GPFS"', 'maxread': '65536', 'maxwrite': '65536', 'nfs_protocols': '"3,4"', 'path': '"/fs0/share-1234"', 'prefread': '65536', 'prefwrite': '65536', 'pseudo': '"/fs0/share-1234"', 'root_access': '"*"', 'rw_access': '""', 'sectype': '"sys"', 'tag': '"fs100"', 'transport_protocols': '"UDP,TCP"', } export = ganesha_utils._get_export_by_path(self.fake_exports, self.fake_path) self.assertEqual(export, expected_export) def test__get_export_by_path_export_does_not_exists(self): share_path = '/fs0/share-1111' export = ganesha_utils._get_export_by_path(self.fake_exports, share_path) self.assertIsNone(export) def test_get_next_id(self): expected_id = 102 result = ganesha_utils.get_next_id(self.fake_exports) self.assertEqual(result, expected_id) def test_convert_ipstring_to_ipn_exception(self): ipstring = 'fake ip string' self.assertRaises(exception.GPFSGaneshaException, ganesha_utils._convert_ipstring_to_ipn, ipstring) @mock.patch('six.moves.builtins.map') def test_get_next_id_first_export(self, mock_map): expected_id = self.STARTING_EXPORT_ID mock_map.side_effect = ValueError result = ganesha_utils.get_next_id(self.fake_exports) self.assertEqual(result, expected_id) def test_format_access_list(self): access_string = "9.123.12.1,9.123.12.2,9.122" result = ganesha_utils.format_access_list(access_string, None) self.assertEqual(result, "9.122.0.0,9.123.12.1,9.123.12.2") def test_format_access_list_deny_access(self): access_string = "9.123.12.1,9.123,12.2" deny_access = "9.123,12.2" result = ganesha_utils.format_access_list(access_string, deny_access=deny_access) self.assertEqual(result, "9.123.12.1") def test_publish_ganesha_config(self): configpath = self.fake_configpath methods = ('_publish_local_config', '_publish_remote_config') for method in methods: self.mock_object(ganesha_utils, method) ganesha_utils.publish_ganesha_config(self.servers, self.sshlogin, self.sshkey, configpath, self.fake_pre_lines, self.fake_exports) ganesha_utils._publish_local_config.assert_called_once_with( configpath, self.fake_pre_lines, self.fake_exports ) for remote_ip in self.remote_ips: ganesha_utils._publish_remote_config.assert_any_call( remote_ip, self.sshlogin, self.sshkey, configpath ) def test_reload_ganesha_config(self): self.mock_object(utils, 'execute', mock.Mock(return_value=True)) service = 'ganesha.nfsd' ganesha_utils.reload_ganesha_config(self.servers, self.sshlogin) reload_cmd = ['service', service, 'restart'] utils.execute.assert_any_call(*reload_cmd, run_as_root=True) for remote_ip in self.remote_ips: reload_cmd = ['service', service, 'restart'] remote_login = self.sshlogin + '@' + remote_ip reload_cmd = ['ssh', remote_login] + reload_cmd utils.execute.assert_any_call( *reload_cmd, run_as_root=False ) def test_reload_ganesha_config_exception(self): self.mock_object( utils, 'execute', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises(exception.GPFSGaneshaException, ganesha_utils.reload_ganesha_config, self.servers, self.sshlogin) @mock.patch('six.moves.builtins.open') def test__publish_local_config(self, mock_open): self.mock_object(utils, 'execute', mock.Mock(return_value=True)) fake_timestamp = 1415506949.75 self.mock_object(time, 'time', mock.Mock(return_value=fake_timestamp)) configpath = self.fake_configpath tmp_path = '%s.tmp.%s' % (configpath, fake_timestamp) ganesha_utils._publish_local_config(configpath, self.fake_pre_lines, self.fake_exports) cpcmd = ['install', '-m', '666', configpath, tmp_path] utils.execute.assert_any_call(*cpcmd, run_as_root=True) mvcmd = ['mv', tmp_path, configpath] utils.execute.assert_any_call(*mvcmd, run_as_root=True) self.assertTrue(time.time.called) @mock.patch('six.moves.builtins.open') def test__publish_local_config_exception(self, mock_open): self.mock_object( utils, 'execute', mock.Mock(side_effect=exception.ProcessExecutionError)) fake_timestamp = 1415506949.75 self.mock_object(time, 'time', mock.Mock(return_value=fake_timestamp)) configpath = self.fake_configpath tmp_path = '%s.tmp.%s' % (configpath, fake_timestamp) self.assertRaises(exception.GPFSGaneshaException, ganesha_utils._publish_local_config, configpath, self.fake_pre_lines, self.fake_exports) cpcmd = ['install', '-m', '666', configpath, tmp_path] utils.execute.assert_called_once_with(*cpcmd, run_as_root=True) self.assertTrue(time.time.called) def test__publish_remote_config(self): utils.execute = mock.Mock(return_value=True) server = self.remote_ips[1] dest = '%s@%s:%s' % (self.sshlogin, server, self.fake_configpath) scpcmd = ['scp', '-i', self.sshkey, self.fake_configpath, dest] ganesha_utils._publish_remote_config(server, self.sshlogin, self.sshkey, self.fake_configpath) utils.execute.assert_called_once_with(*scpcmd, run_as_root=False) def test__publish_remote_config_exception(self): self.mock_object( utils, 'execute', mock.Mock(side_effect=exception.ProcessExecutionError)) server = self.remote_ips[1] dest = '%s@%s:%s' % (self.sshlogin, server, self.fake_configpath) scpcmd = ['scp', '-i', self.sshkey, self.fake_configpath, dest] self.assertRaises(exception.GPFSGaneshaException, ganesha_utils._publish_remote_config, server, self.sshlogin, self.sshkey, self.fake_configpath) utils.execute.assert_called_once_with(*scpcmd, run_as_root=False) manila-2.0.0/manila/tests/share/drivers/ibm/__init__.py0000664000567000056710000000000012701407107024143 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/ibm/test_gpfs.py0000664000567000056710000012416612701407107024426 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the IBM GPFS driver module.""" import re import socket import ddt import mock from oslo_config import cfg from manila import context from manila import exception import manila.share.configuration as config import manila.share.drivers.ibm.ganesha_utils as ganesha_utils import manila.share.drivers.ibm.gpfs as gpfs from manila import test from manila.tests import fake_share from manila import utils CONF = cfg.CONF @ddt.ddt class GPFSShareDriverTestCase(test.TestCase): """Tests GPFSShareDriver.""" def setUp(self): super(GPFSShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._gpfs_execute = mock.Mock(return_value=('', '')) self._helper_fake = mock.Mock() CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = gpfs.GPFSShareDriver(execute=self._gpfs_execute, configuration=self.fake_conf) self._knfs_helper = gpfs.KNFSHelper(self._gpfs_execute, self.fake_conf) self._gnfs_helper = gpfs.GNFSHelper(self._gpfs_execute, self.fake_conf) self.fakedev = "/dev/gpfs0" self.fakefspath = "/gpfs0" self.fakesharepath = "/gpfs0/share-fakeid" self.fakesnapshotpath = "/gpfs0/.snapshots/snapshot-fakesnapshotid" self.mock_object(gpfs.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'KNFS': self._helper_fake } self.share = fake_share.fake_share(share_proto='NFS') self.server = { 'backend_details': { 'ip': '1.2.3.4', 'instance_id': 'fake' } } self.access = fake_share.fake_access() self.snapshot = fake_share.fake_snapshot() self.local_ip = "192.11.22.1" self.remote_ip = "192.11.22.2" gpfs_nfs_server_list = [self.local_ip, self.remote_ip] self._knfs_helper.configuration.gpfs_nfs_server_list = \ gpfs_nfs_server_list self._gnfs_helper.configuration.gpfs_nfs_server_list = \ gpfs_nfs_server_list self._gnfs_helper.configuration.ganesha_config_path = \ "fake_ganesha_config_path" self.sshlogin = "fake_login" self.sshkey = "fake_sshkey" self.gservice = "fake_ganesha_service" self._gnfs_helper.configuration.gpfs_ssh_login = self.sshlogin self._gnfs_helper.configuration.gpfs_ssh_private_key = self.sshkey self._gnfs_helper.configuration.ganesha_service_name = self.gservice self.mock_object(socket, 'gethostname', mock.Mock(return_value="testserver")) self.mock_object(socket, 'gethostbyname_ex', mock.Mock( return_value=('localhost', ['localhost.localdomain', 'testserver'], ['127.0.0.1', self.local_ip]) )) def test__run_ssh(self): cmd_list = ['fake', 'cmd'] expected_cmd = 'fake cmd' ssh_pool = mock.Mock() ssh = mock.Mock() self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) ssh_pool.item = mock.Mock(return_value=ssh) setattr(ssh, '__enter__', mock.Mock()) setattr(ssh, '__exit__', mock.Mock()) self.mock_object(self._driver, '_gpfs_ssh_execute') self._driver._run_ssh(self.local_ip, cmd_list) self._driver._gpfs_ssh_execute.assert_called_once_with( mock.ANY, expected_cmd, check_exit_code=True) def test__run_ssh_exception(self): cmd_list = ['fake', 'cmd'] ssh_pool = mock.Mock() ssh = mock.Mock() self.mock_object(utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) ssh_pool.item = mock.Mock(return_value=ssh) self.mock_object(self._driver, '_gpfs_ssh_execute') self.assertRaises(exception.GPFSException, self._driver._run_ssh, self.local_ip, cmd_list) def test__gpfs_ssh_execute(self): cmd = 'fake cmd' expected_out = 'cmd successful' expected_err = 'cmd error' ssh = mock.Mock() stdin_stream = mock.Mock() stdout_stream = mock.Mock() stderr_stream = mock.Mock() ssh.exec_command = mock.Mock(return_value=(stdin_stream, stdout_stream, stderr_stream)) stdout_stream.channel.recv_exit_status = mock.Mock(return_value=-1) stdout_stream.read = mock.Mock(return_value=expected_out) stderr_stream.read = mock.Mock(return_value=expected_err) stdin_stream.close = mock.Mock() actual_out, actual_err = self._driver._gpfs_ssh_execute(ssh, cmd) self.assertEqual(actual_out, expected_out) self.assertEqual(actual_err, expected_err) def test__gpfs_ssh_execute_exception(self): cmd = 'fake cmd' ssh = mock.Mock() stdin_stream = mock.Mock() stdout_stream = mock.Mock() stderr_stream = mock.Mock() ssh.exec_command = mock.Mock(return_value=(stdin_stream, stdout_stream, stderr_stream)) stdout_stream.channel.recv_exit_status = mock.Mock(return_value=1) stdout_stream.read = mock.Mock() stderr_stream.read = mock.Mock() stdin_stream.close = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self._driver._gpfs_ssh_execute, ssh, cmd) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): self.mock_object( self._driver, '_get_available_capacity', mock.Mock(return_value=(11111.0, 12345.0))) result = self._driver.get_share_stats(True) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', ] for key in expected_keys: self.assertIn(key, result) self.assertFalse(result['driver_handles_share_servers']) self.assertEqual('IBM', result['vendor_name']) self._driver._get_available_capacity.assert_called_once_with( self._driver.configuration.gpfs_mount_point_base) def test_do_setup(self): self.mock_object(self._driver, '_setup_helpers') self._driver.do_setup(self._context) self._driver._setup_helpers.assert_called_once_with() def test_setup_helpers(self): self._driver._helpers = {} CONF.set_default('gpfs_share_helpers', ['KNFS=fakenfs']) self.mock_object(gpfs.importutils, 'import_class', mock.Mock(return_value=self._helper_fake)) self._driver._setup_helpers() gpfs.importutils.import_class.assert_has_calls( [mock.call('fakenfs')] ) self.assertEqual(len(self._driver._helpers), 1) @ddt.data(fake_share.fake_share(), fake_share.fake_share(share_proto='NFSBOGUS')) def test__get_helper_with_wrong_proto(self, share): self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test__local_path(self): sharename = 'fakesharename' self._driver.configuration.gpfs_mount_point_base =\ self.fakefspath local_path = self._driver._local_path(sharename) self.assertEqual(self.fakefspath + '/' + sharename, local_path) def test__get_share_path(self): self._driver.configuration.gpfs_mount_point_base =\ self.fakefspath share_path = self._driver._get_share_path(self.share) self.assertEqual(self.fakefspath + '/' + self.share['name'], share_path) def test__get_snapshot_path(self): self._driver.configuration.gpfs_mount_point_base =\ self.fakefspath snapshot_path = self._driver._get_snapshot_path(self.snapshot) self.assertEqual(self.fakefspath + '/' + self.snapshot['share_name'] + '/.snapshots/' + self.snapshot['name'], snapshot_path) def test_check_for_setup_error_for_gpfs_state(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_export_ip(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = None self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_for_gpfs_mount_point_base(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = 'test' self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_directory_check(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_gpfs_path_check(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_nfs_server_type(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=True)) self._driver.configuration.gpfs_nfs_server_type = 'test' self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_for_nfs_server_list(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=True)) self._driver.configuration.gpfs_nfs_server_type = 'KNFS' self._driver.configuration.gpfs_nfs_server_list = None self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test__get_available_capacity(self): path = self.fakefspath mock_out = "Filesystem 1-blocks Used Available Capacity Mounted on\n\ /dev/gpfs0 100 30 70 30% /gpfs0" self.mock_object(self._driver, '_gpfs_execute', mock.Mock(return_value=(mock_out, ''))) available, size = self._driver._get_available_capacity(path) self.assertEqual(70, available) self.assertEqual(100, size) def test_create_share(self): self._helper_fake.create_export.return_value = 'fakelocation' methods = ('_create_share', '_get_share_path') for method in methods: self.mock_object(self._driver, method) result = self._driver.create_share(self._context, self.share, share_server=self.server) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(result, 'fakelocation') def test_create_share_from_snapshot(self): self._helper_fake.create_export.return_value = 'fakelocation' self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._create_share_from_snapshot = mock.Mock() result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._get_share_path.assert_called_once_with(self.share) self._driver._create_share_from_snapshot.assert_called_once_with( self.share, self.snapshot, self.fakesharepath ) self.assertEqual(result, 'fakelocation') def test_create_snapshot(self): self._driver._create_share_snapshot = mock.Mock() self._driver.create_snapshot(self._context, self.snapshot, share_server=None) self._driver._create_share_snapshot.assert_called_once_with( self.snapshot ) def test_delete_share(self): self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath ) self._driver._delete_share = mock.Mock() self._driver.delete_share(self._context, self.share, share_server=None) self._driver._get_share_path.assert_called_once_with(self.share) self._driver._delete_share.assert_called_once_with(self.share) self._helper_fake.remove_export.assert_called_once_with( self.fakesharepath, self.share ) def test_delete_snapshot(self): self._driver._delete_share_snapshot = mock.Mock() self._driver.delete_snapshot(self._context, self.snapshot, share_server=None) self._driver._delete_share_snapshot.assert_called_once_with( self.snapshot ) def test__delete_share_snapshot(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock(return_value=0) self._driver._delete_share_snapshot(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( 'mmdelsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) self._driver._get_gpfs_device.assert_called_once_with() def test__delete_share_snapshot_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._delete_share_snapshot, self.snapshot) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( 'mmdelsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) def test_extend_share(self): self._driver._extend_share = mock.Mock() self._driver.extend_share(self.share, 10) self._driver._extend_share.assert_called_once_with(self.share, 10) def test__extend_share(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._extend_share(self.share, 10) self._driver._gpfs_execute.assert_called_once_with('mmsetquota', '-j', self.share['name'], '-h', '10G', self.fakedev) self._driver._get_gpfs_device.assert_called_once_with() def test__extend_share_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._extend_share, self.share, 10) self._driver._gpfs_execute.assert_called_once_with('mmsetquota', '-j', self.share['name'], '-h', '10G', self.fakedev) self._driver._get_gpfs_device.assert_called_once_with() def test_allow_access(self): self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath ) self._helper_fake.allow_access = mock.Mock() self._driver.allow_access(self._context, self.share, self.access, share_server=None) self._helper_fake.allow_access.assert_called_once_with( self.fakesharepath, self.share, self.access['access_type'], self.access['access_to'] ) self._driver._get_share_path.assert_called_once_with(self.share) def test_deny_access(self): self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._helper_fake.deny_access = mock.Mock() self._driver.deny_access(self._context, self.share, self.access, share_server=None) self._helper_fake.deny_access.assert_called_once_with( self.fakesharepath, self.share, self.access['access_type'], self.access['access_to'] ) self._driver._get_share_path.assert_called_once_with(self.share) def test__check_gpfs_state_active(self): fakeout = "mmgetstate::state:\nmmgetstate::active:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._check_gpfs_state() self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y') self.assertEqual(result, True) def test__check_gpfs_state_down(self): fakeout = "mmgetstate::state:\nmmgetstate::down:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._check_gpfs_state() self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y') self.assertEqual(result, False) def test__check_gpfs_state_wrong_output_exception(self): fakeout = "mmgetstate fake out" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self.assertRaises(exception.GPFSException, self._driver._check_gpfs_state) self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y') def test__check_gpfs_state_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._check_gpfs_state) self._driver._gpfs_execute.assert_called_once_with('mmgetstate', '-Y') def test__is_dir_success(self): fakeoutput = "directory" self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, '')) result = self._driver._is_dir(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) self.assertEqual(result, True) def test__is_dir_failure(self): fakeoutput = "regulalr file" self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, '')) result = self._driver._is_dir(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) self.assertEqual(result, False) def test__is_dir_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._is_dir, self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) def test__is_gpfs_path_ok(self): self._driver._gpfs_execute = mock.Mock(return_value=0) result = self._driver._is_gpfs_path(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with('mmlsattr', self.fakefspath) self.assertEqual(result, True) def test__is_gpfs_path_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._is_gpfs_path, self.fakefspath) self._driver._gpfs_execute.assert_called_once_with('mmlsattr', self.fakefspath) def test__get_gpfs_device(self): fakeout = "Filesystem\n" + self.fakedev orig_val = self._driver.configuration.gpfs_mount_point_base self._driver.configuration.gpfs_mount_point_base = self.fakefspath self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._get_gpfs_device() self._driver._gpfs_execute.assert_called_once_with('df', self.fakefspath) self.assertEqual(result, self.fakedev) self._driver.configuration.gpfs_mount_point_base = orig_val def test__get_gpfs_device_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._driver._get_gpfs_device) def test__create_share(self): sizestr = '%sG' % self.share['size'] self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._create_share(self.share) self._driver._gpfs_execute.assert_any_call('mmcrfileset', self.fakedev, self.share['name'], '--inode-space', 'new') self._driver._gpfs_execute.assert_any_call('mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath) self._driver._gpfs_execute.assert_any_call('mmsetquota', '-j', self.share['name'], '-h', sizestr, self.fakedev) self._driver._gpfs_execute.assert_any_call('chmod', '777', self.fakesharepath) self._driver._local_path.assert_called_once_with(self.share['name']) self._driver._get_gpfs_device.assert_called_once_with() def test__create_share_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share, self.share) self._driver._get_gpfs_device.assert_called_once_with() self._driver._local_path.assert_called_once_with(self.share['name']) self._driver._gpfs_execute.assert_called_once_with('mmcrfileset', self.fakedev, self.share['name'], '--inode-space', 'new') def test__delete_share(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._delete_share(self.share) self._driver._gpfs_execute.assert_any_call( 'mmunlinkfileset', self.fakedev, self.share['name'], '-f') self._driver._gpfs_execute.assert_any_call( 'mmdelfileset', self.fakedev, self.share['name'], '-f') self._driver._get_gpfs_device.assert_called_once_with() def test__delete_share_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._delete_share, self.share) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( 'mmunlinkfileset', self.fakedev, self.share['name'], '-f') def test__create_share_snapshot(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._create_share_snapshot(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( 'mmcrsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) self._driver._get_gpfs_device.assert_called_once_with() def test__create_share_snapshot_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share_snapshot, self.snapshot) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( 'mmcrsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) def test__create_share_from_snapshot(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._create_share_from_snapshot(self.share, self.snapshot, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( 'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath ) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) def test__create_share_from_snapshot_exception(self): self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share_from_snapshot, self.share, self.snapshot, self.fakesharepath) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( 'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath ) def test__gpfs_local_execute(self): self.mock_object(utils, 'execute', mock.Mock(return_value=True)) cmd = "testcmd" self._driver._gpfs_local_execute(cmd) utils.execute.assert_called_once_with(cmd, run_as_root=True) def test__gpfs_remote_execute(self): self._driver._run_ssh = mock.Mock(return_value=True) cmd = "testcmd" orig_value = self._driver.configuration.gpfs_share_export_ip self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver._gpfs_remote_execute(cmd, check_exit_code=True) self._driver._run_ssh.assert_called_once_with( self.local_ip, tuple([cmd]), True ) self._driver.configuration.gpfs_share_export_ip = orig_value def test_knfs_allow_access(self): self._knfs_helper._execute = mock.Mock( return_value=['/fs0 ', 0] ) self.mock_object(re, 'search', mock.Mock(return_value=None)) export_opts = None self._knfs_helper._get_export_options = mock.Mock( return_value=export_opts ) self._knfs_helper._publish_access = mock.Mock() access_type = self.access['access_type'] access = self.access['access_to'] local_path = self.fakesharepath self._knfs_helper.allow_access(local_path, self.share, access_type, access) self._knfs_helper._execute.assert_called_once_with('exportfs', run_as_root=True) self.assertTrue(re.search.called) self._knfs_helper._get_export_options.assert_any_call(self.share) cmd = ['exportfs', '-o', export_opts, ':'.join([access, local_path])] self._knfs_helper._publish_access.assert_called_once_with(*cmd) def test_knfs_allow_access_access_exists(self): out = ['/fs0 ', 0] self._knfs_helper._execute = mock.Mock(return_value=out) self.mock_object(re, 'search', mock.Mock(return_value="fake")) self._knfs_helper._get_export_options = mock.Mock() access_type = self.access['access_type'] access = self.access['access_to'] local_path = self.fakesharepath self.assertRaises(exception.ShareAccessExists, self._knfs_helper.allow_access, local_path, self.share, access_type, access) self._knfs_helper._execute.assert_any_call('exportfs', run_as_root=True) self.assertTrue(re.search.called) self.assertFalse(self._knfs_helper._get_export_options.called) def test_knfs_allow_access_invalid_access(self): access_type = 'invalid_access_type' self.assertRaises(exception.InvalidShareAccess, self._knfs_helper.allow_access, self.fakesharepath, self.share, access_type, self.access['access_to']) def test_knfs_allow_access_exception(self): self._knfs_helper._execute = mock.Mock( side_effect=exception.ProcessExecutionError ) access_type = self.access['access_type'] access = self.access['access_to'] local_path = self.fakesharepath self.assertRaises(exception.GPFSException, self._knfs_helper.allow_access, local_path, self.share, access_type, access) self._knfs_helper._execute.assert_called_once_with('exportfs', run_as_root=True) def test_knfs_deny_access(self): self._knfs_helper._publish_access = mock.Mock() access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath self._knfs_helper.deny_access(local_path, self.share, access_type, access) cmd = ['exportfs', '-u', ':'.join([access, local_path])] self._knfs_helper._publish_access.assert_called_once_with(*cmd) def test_knfs_deny_access_exception(self): self._knfs_helper._publish_access = mock.Mock( side_effect=exception.ProcessExecutionError ) access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath cmd = ['exportfs', '-u', ':'.join([access, local_path])] self.assertRaises(exception.GPFSException, self._knfs_helper.deny_access, local_path, self.share, access_type, access) self._knfs_helper._publish_access.assert_called_once_with(*cmd) def test_knfs__publish_access(self): self.mock_object(utils, 'execute') cmd = ['fakecmd'] self._knfs_helper._publish_access(*cmd) utils.execute.assert_any_call(*cmd, run_as_root=True, check_exit_code=True) remote_login = self.sshlogin + '@' + self.remote_ip cmd = ['ssh', remote_login] + list(cmd) utils.execute.assert_any_call(*cmd, run_as_root=False, check_exit_code=True) self.assertTrue(socket.gethostbyname_ex.called) self.assertTrue(socket.gethostname.called) def test_knfs__publish_access_exception(self): self.mock_object( utils, 'execute', mock.Mock(side_effect=exception.ProcessExecutionError)) cmd = ['fakecmd'] self.assertRaises(exception.ProcessExecutionError, self._knfs_helper._publish_access, *cmd) self.assertTrue(socket.gethostbyname_ex.called) self.assertTrue(socket.gethostname.called) utils.execute.assert_called_once_with(*cmd, run_as_root=True, check_exit_code=True) def test_gnfs_allow_access(self): self._gnfs_helper._ganesha_process_request = mock.Mock() access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath self._gnfs_helper.allow_access(local_path, self.share, access_type, access) self._gnfs_helper._ganesha_process_request.assert_called_once_with( "allow_access", local_path, self.share, access_type, access ) def test_gnfs_allow_access_invalid_access(self): access_type = 'invalid_access_type' self.assertRaises(exception.InvalidShareAccess, self._gnfs_helper.allow_access, self.fakesharepath, self.share, access_type, self.access['access_to']) def test_gnfs_deny_access(self): self._gnfs_helper._ganesha_process_request = mock.Mock() access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath self._gnfs_helper.deny_access(local_path, self.share, access_type, access) self._gnfs_helper._ganesha_process_request.assert_called_once_with( "deny_access", local_path, self.share, access_type, access, False ) def test_gnfs_remove_export(self): self._gnfs_helper._ganesha_process_request = mock.Mock() local_path = self.fakesharepath self._gnfs_helper.remove_export(local_path, self.share) self._gnfs_helper._ganesha_process_request.assert_called_once_with( "remove_export", local_path, self.share ) def test_gnfs__ganesha_process_request_allow_access(self): access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath cfgpath = self._gnfs_helper.configuration.ganesha_config_path gservers = self._gnfs_helper.configuration.gpfs_nfs_server_list export_opts = [] pre_lines = [] exports = {} self._gnfs_helper._get_export_options = mock.Mock( return_value=export_opts ) self.mock_object(ganesha_utils, 'parse_ganesha_config', mock.Mock( return_value=(pre_lines, exports) )) self.mock_object(ganesha_utils, 'export_exists', mock.Mock( return_value=False )) self.mock_object(ganesha_utils, 'get_next_id', mock.Mock( return_value=101 )) self.mock_object(ganesha_utils, 'get_export_template', mock.Mock( return_value={} )) self.mock_object(ganesha_utils, 'publish_ganesha_config') self.mock_object(ganesha_utils, 'reload_ganesha_config') self._gnfs_helper._ganesha_process_request( "allow_access", local_path, self.share, access_type, access ) self._gnfs_helper._get_export_options.assert_called_once_with( self.share ) ganesha_utils.export_exists.assert_called_once_with(exports, local_path) ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath) ganesha_utils.publish_ganesha_config.assert_called_once_with( gservers, self.sshlogin, self.sshkey, cfgpath, pre_lines, exports ) ganesha_utils.reload_ganesha_config.assert_called_once_with( gservers, self.sshlogin, self.gservice ) def test_gnfs__ganesha_process_request_deny_access(self): access = self.access['access_to'] access_type = self.access['access_type'] local_path = self.fakesharepath cfgpath = self._gnfs_helper.configuration.ganesha_config_path gservers = self._gnfs_helper.configuration.gpfs_nfs_server_list pre_lines = [] initial_access = "10.0.0.1,10.0.0.2" export = {"rw_access": initial_access} exports = {} self.mock_object(ganesha_utils, 'parse_ganesha_config', mock.Mock( return_value=(pre_lines, exports) )) self.mock_object(ganesha_utils, 'get_export_by_path', mock.Mock( return_value=export )) self.mock_object(ganesha_utils, 'format_access_list', mock.Mock( return_value="10.0.0.1" )) self.mock_object(ganesha_utils, 'publish_ganesha_config') self.mock_object(ganesha_utils, 'reload_ganesha_config') self._gnfs_helper._ganesha_process_request( "deny_access", local_path, self.share, access_type, access ) ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath) ganesha_utils.get_export_by_path.assert_called_once_with(exports, local_path) ganesha_utils.format_access_list.assert_called_once_with( initial_access, deny_access=access ) ganesha_utils.publish_ganesha_config.assert_called_once_with( gservers, self.sshlogin, self.sshkey, cfgpath, pre_lines, exports ) ganesha_utils.reload_ganesha_config.assert_called_once_with( gservers, self.sshlogin, self.gservice ) def test_gnfs__ganesha_process_request_remove_export(self): local_path = self.fakesharepath cfgpath = self._gnfs_helper.configuration.ganesha_config_path pre_lines = [] exports = {} export = {} self.mock_object(ganesha_utils, 'parse_ganesha_config', mock.Mock( return_value=(pre_lines, exports) )) self.mock_object(ganesha_utils, 'get_export_by_path', mock.Mock( return_value=export )) self.mock_object(ganesha_utils, 'publish_ganesha_config') self.mock_object(ganesha_utils, 'reload_ganesha_config') self._gnfs_helper._ganesha_process_request( "remove_export", local_path, self.share ) ganesha_utils.parse_ganesha_config.assert_called_once_with(cfgpath) ganesha_utils.get_export_by_path.assert_called_once_with(exports, local_path) self.assertFalse(ganesha_utils.publish_ganesha_config.called) self.assertFalse(ganesha_utils.reload_ganesha_config.called) manila-2.0.0/manila/tests/share/drivers/netapp/0000775000567000056710000000000012701407265022571 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/test_common.py0000664000567000056710000001447712701407107025502 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from manila import exception from manila.share.drivers.netapp import common as na_common from manila.share.drivers.netapp.dataontap.cluster_mode import drv_multi_svm from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp import fakes as na_fakes class NetAppDriverFactoryTestCase(test.TestCase): def setUp(self): super(NetAppDriverFactoryTestCase, self).setUp() def test_new(self): self.mock_object(na_utils.OpenStackInfo, 'info', mock.Mock(return_value='fake_info')) mock_get_driver_mode = self.mock_object( na_common.NetAppDriver, '_get_driver_mode', mock.Mock(return_value='fake_mode')) mock_create_driver = self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' config.driver_handles_share_servers = True kwargs = {'configuration': config} na_common.NetAppDriver(**kwargs) kwargs['app_version'] = 'fake_info' mock_get_driver_mode.assert_called_once_with('fake_family', True) mock_create_driver.assert_called_once_with('fake_family', 'fake_mode', *(), **kwargs) def test_new_missing_config(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{}) def test_new_missing_family(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.driver_handles_share_servers = True config.netapp_storage_family = None kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_new_missing_mode(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.driver_handles_share_servers = None config.netapp_storage_family = 'fake_family' kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_get_driver_mode_missing_mode_good_default(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', None) self.assertEqual(na_common.MULTI_SVM, result) def test_create_driver_missing_mode_no_default(self): self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._get_driver_mode, 'fake_family', None) def test_get_driver_mode_multi_svm(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', True) self.assertEqual(na_common.MULTI_SVM, result) def test_get_driver_mode_single_svm(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', False) self.assertEqual(na_common.SINGLE_SVM, result) def test_create_driver(self): def get_full_class_name(obj): return obj.__module__ + '.' + obj.__class__.__name__ registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY for family in six.iterkeys(registry): for mode, full_class_name in registry[family].items(): config = na_fakes.create_configuration() config.local_conf.set_override('driver_handles_share_servers', mode == na_common.MULTI_SVM) kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': 'fake_info' } driver = na_common.NetAppDriver._create_driver( family, mode, **kwargs) self.assertEqual(full_class_name, get_full_class_name(driver)) def test_create_driver_case_insensitive(self): config = na_fakes.create_configuration() config.local_conf.set_override('driver_handles_share_servers', True) kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': 'fake_info' } driver = na_common.NetAppDriver._create_driver('ONTAP_CLUSTER', na_common.MULTI_SVM, **kwargs) self.assertIsInstance(driver, drv_multi_svm.NetAppCmodeMultiSvmShareDriver) def test_create_driver_invalid_family(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._create_driver, 'fake_family', na_common.MULTI_SVM, **kwargs) def test_create_driver_invalid_mode(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._create_driver, 'ontap_cluster', 'fake_mode', **kwargs) manila-2.0.0/manila/tests/share/drivers/netapp/__init__.py0000664000567000056710000000000012701407107024663 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/test_utils.py0000664000567000056710000003735012701407107025345 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import platform import mock from oslo_concurrency import processutils as putils from oslo_log import log from manila import exception from manila.share.drivers.netapp import utils as na_utils from manila import test from manila import version class NetAppDriverUtilsTestCase(test.TestCase): def setUp(self): super(NetAppDriverUtilsTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(na_utils.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(na_utils.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) na_utils.setup_tracing(None) def test_setup_tracing(self): na_utils.setup_tracing(None) self.assertFalse(na_utils.TRACE_API) self.assertFalse(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method,api') self.assertTrue(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_setup_tracing_invalid_key(self): na_utils.setup_tracing('method,fake') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(1, na_utils.LOG.warning.call_count) @na_utils.trace def _trace_test_method(*args, **kwargs): return 'OK' def test_trace_no_tracing(self): result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(0, na_utils.LOG.debug.call_count) na_utils.setup_tracing('method') def test_trace_method_tracing(self): na_utils.setup_tracing('method') result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, na_utils.LOG.debug.call_count) def test_validate_driver_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_validate_driver_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(1, na_utils.LOG.warning.call_count) def test_check_flags(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag2': 'value2'}) self.assertIsNone(na_utils.check_flags(['flag1', 'flag2'], configuration)) def test_check_flags_missing_flag(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag3': 'value3'}) self.assertRaises(exception.InvalidInput, na_utils.check_flags, ['flag1', 'flag2'], configuration) def test_convert_to_list(self): self.assertListEqual([], na_utils.convert_to_list(None)) self.assertListEqual(['test'], na_utils.convert_to_list('test')) self.assertListEqual(['a'], na_utils.convert_to_list(['a'])) self.assertListEqual(['a', 'b'], na_utils.convert_to_list(['a', 'b'])) self.assertListEqual([1, 2, 3], na_utils.convert_to_list((1, 2, 3))) self.assertListEqual([5], na_utils.convert_to_list(5)) self.assertListEqual( sorted(['key1', 'key2']), sorted(na_utils.convert_to_list({'key1': 'value1', 'key2': 'value2'}))) class OpenstackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def setUp(self): super(OpenstackInfoTestCase, self).setUp() def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_exception_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_exception_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/0000775000567000056710000000000012701407265024544 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/0000775000567000056710000000000012701407265026570 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/__init__.py0000664000567000056710000000000012701407107030662 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_base.py0000664000567000056710000000306612701407107031113 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols base class module. """ import ddt from manila.common import constants from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila import test @ddt.ddt class NetAppNASHelperBaseTestCase(test.TestCase): def test_set_client(self): # The base class is abstract, so we'll use a subclass to test # base class functionality. helper = nfs_cmode.NetAppCmodeNFSHelper() self.assertIsNone(helper._client) helper.set_client('fake_client') self.assertEqual('fake_client', helper._client) @ddt.data( {'level': constants.ACCESS_LEVEL_RW, 'readonly': False}, {'level': constants.ACCESS_LEVEL_RO, 'readonly': True}) @ddt.unpack def test_is_readonly(self, level, readonly): helper = nfs_cmode.NetAppCmodeNFSHelper() result = helper._is_readonly(level) self.assertEqual(readonly, result) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py0000664000567000056710000002024012701407112032261 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols CIFS class module. """ import copy import ddt import mock from oslo_log import log from manila.common import constants from manila import exception from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila import test from manila.tests.share.drivers.netapp.dataontap.protocols \ import fakes as fake @ddt.ddt class NetAppClusteredCIFSHelperTestCase(test.TestCase): def setUp(self): super(NetAppClusteredCIFSHelperTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(cifs_cmode.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_context = mock.Mock() self.mock_client = mock.Mock() self.helper = cifs_cmode.NetAppCmodeCIFSHelper() self.helper.set_client(self.mock_client) def test_create_share(self): result = self.helper.create_share(fake.CIFS_SHARE, fake.SHARE_NAME) export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2] export_paths = [result(address) for address in export_addresses] expected_paths = [ r'\\%s\%s' % (fake.SHARE_ADDRESS_1, fake.SHARE_NAME), r'\\%s\%s' % (fake.SHARE_ADDRESS_2, fake.SHARE_NAME), ] self.assertEqual(expected_paths, export_paths) self.mock_client.create_cifs_share.assert_called_once_with( fake.SHARE_NAME) self.mock_client.remove_cifs_share_access.assert_called_once_with( fake.SHARE_NAME, 'Everyone') def test_delete_share(self): self.helper.delete_share(fake.CIFS_SHARE, fake.SHARE_NAME) self.mock_client.remove_cifs_share.assert_called_once_with( fake.SHARE_NAME) def test_update_access(self): mock_validate_access_rule = self.mock_object(self.helper, '_validate_access_rule') mock_get_access_rules = self.mock_object( self.helper, '_get_access_rules', mock.Mock(return_value=fake.EXISTING_CIFS_RULES)) mock_handle_added_rules = self.mock_object(self.helper, '_handle_added_rules') mock_handle_ro_to_rw_rules = self.mock_object(self.helper, '_handle_ro_to_rw_rules') mock_handle_rw_to_ro_rules = self.mock_object(self.helper, '_handle_rw_to_ro_rules') mock_handle_deleted_rules = self.mock_object(self.helper, '_handle_deleted_rules') self.helper.update_access(fake.CIFS_SHARE, fake.SHARE_NAME, [fake.USER_ACCESS]) new_rules = {'fake_user': constants.ACCESS_LEVEL_RW} mock_validate_access_rule.assert_called_once_with(fake.USER_ACCESS) mock_get_access_rules.assert_called_once_with(fake.CIFS_SHARE, fake.SHARE_NAME) mock_handle_added_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_ro_to_rw_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_rw_to_ro_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_deleted_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) def test_validate_access_rule(self): result = self.helper._validate_access_rule(fake.USER_ACCESS) self.assertIsNone(result) def test_validate_access_rule_invalid_type(self): rule = copy.copy(fake.USER_ACCESS) rule['access_type'] = 'ip' self.assertRaises(exception.InvalidShareAccess, self.helper._validate_access_rule, rule) def test_validate_access_rule_invalid_level(self): rule = copy.copy(fake.USER_ACCESS) rule['access_level'] = 'none' self.assertRaises(exception.InvalidShareAccessLevel, self.helper._validate_access_rule, rule) def test_handle_added_rules(self): self.helper._handle_added_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.add_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user5', False), mock.call(fake.SHARE_NAME, 'user6', True), ], any_order=True) def test_handle_ro_to_rw_rules(self): self.helper._handle_ro_to_rw_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.modify_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user2', False) ]) def test_handle_rw_to_ro_rules(self): self.helper._handle_rw_to_ro_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.modify_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user3', True) ]) def test_handle_deleted_rules(self): self.helper._handle_deleted_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.remove_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user4') ]) def test_get_access_rules(self): self.mock_client.get_cifs_share_access = ( mock.Mock(return_value='fake_rules')) result = self.helper._get_access_rules(fake.CIFS_SHARE, fake.SHARE_NAME) self.assertEqual('fake_rules', result) self.mock_client.get_cifs_share_access.assert_called_once_with( fake.SHARE_NAME) def test_get_target(self): target = self.helper.get_target(fake.CIFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, target) def test_get_target_missing_location(self): target = self.helper.get_target({'export_location': ''}) self.assertEqual('', target) def test_get_share_name_for_share(self): share_name = self.helper.get_share_name_for_share(fake.CIFS_SHARE) self.assertEqual(fake.SHARE_NAME, share_name) @ddt.data( { 'location': r'\\%s\%s' % (fake.SHARE_ADDRESS_1, fake.SHARE_NAME), 'ip': fake.SHARE_ADDRESS_1, 'share_name': fake.SHARE_NAME, }, { 'location': r'//%s/%s' % (fake.SHARE_ADDRESS_1, fake.SHARE_NAME), 'ip': fake.SHARE_ADDRESS_1, 'share_name': fake.SHARE_NAME, }, {'location': '', 'ip': '', 'share_name': ''}, {'location': 'invalid', 'ip': '', 'share_name': ''}, ) @ddt.unpack def test_get_export_location(self, location, ip, share_name): share = fake.CIFS_SHARE.copy() share['export_location'] = location result_ip, result_share_name = self.helper._get_export_location(share) self.assertEqual(ip, result_ip) self.assertEqual(share_name, result_share_name) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_nfs_cmode.py0000664000567000056710000002131712701407107032135 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols NFS class module. """ import copy import uuid import ddt import mock import netaddr from manila import exception from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila import test from manila.tests.share.drivers.netapp.dataontap.protocols \ import fakes as fake @ddt.ddt class NetAppClusteredNFSHelperTestCase(test.TestCase): def setUp(self): super(NetAppClusteredNFSHelperTestCase, self).setUp() self.mock_context = mock.Mock() self.mock_client = mock.Mock() self.helper = nfs_cmode.NetAppCmodeNFSHelper() self.helper.set_client(self.mock_client) def test_create_share(self): mock_ensure_export_policy = self.mock_object(self.helper, '_ensure_export_policy') self.mock_client.get_volume_junction_path.return_value = ( fake.NFS_SHARE_PATH) result = self.helper.create_share(fake.NFS_SHARE, fake.SHARE_NAME) export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2] export_paths = [result(address) for address in export_addresses] expected_paths = [ fake.SHARE_ADDRESS_1 + ":" + fake.NFS_SHARE_PATH, fake.SHARE_ADDRESS_2 + ":" + fake.NFS_SHARE_PATH, ] self.assertEqual(expected_paths, export_paths) (self.mock_client.clear_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME)) self.assertTrue(mock_ensure_export_policy.called) def test_delete_share(self): self.helper.delete_share(fake.NFS_SHARE, fake.SHARE_NAME) self.mock_client.clear_nfs_export_policy_for_volume.\ assert_called_once_with(fake.SHARE_NAME) self.mock_client.soft_delete_nfs_export_policy.assert_called_once_with( fake.EXPORT_POLICY_NAME) def test_update_access(self): self.mock_object(self.helper, '_ensure_export_policy') self.mock_object(self.helper, '_get_export_policy_name', mock.Mock(return_value='fake_export_policy')) self.mock_object(self.helper, '_get_temp_export_policy_name', mock.Mock(side_effect=['fake_new_export_policy', 'fake_old_export_policy'])) self.helper.update_access(fake.CIFS_SHARE, fake.SHARE_NAME, [fake.IP_ACCESS]) self.mock_client.create_nfs_export_policy.assert_called_once_with( 'fake_new_export_policy') self.mock_client.add_nfs_export_rule.assert_called_once_with( 'fake_new_export_policy', fake.CLIENT_ADDRESS_1, False) (self.mock_client.set_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME, 'fake_new_export_policy')) (self.mock_client.soft_delete_nfs_export_policy. assert_called_once_with('fake_old_export_policy')) self.mock_client.rename_nfs_export_policy.assert_has_calls([ mock.call('fake_export_policy', 'fake_old_export_policy'), mock.call('fake_new_export_policy', 'fake_export_policy'), ]) def test_validate_access_rule(self): result = self.helper._validate_access_rule(fake.IP_ACCESS) self.assertIsNone(result) def test_validate_access_rule_invalid_type(self): rule = copy.copy(fake.IP_ACCESS) rule['access_type'] = 'user' self.assertRaises(exception.InvalidShareAccess, self.helper._validate_access_rule, rule) def test_validate_access_rule_invalid_level(self): rule = copy.copy(fake.IP_ACCESS) rule['access_level'] = 'none' self.assertRaises(exception.InvalidShareAccessLevel, self.helper._validate_access_rule, rule) def test_get_sorted_access_rule_addresses(self): result = self.helper._get_sorted_access_rule_addresses( fake.NEW_NFS_RULES) expected = [ '10.10.20.10', '10.10.20.0/24', '10.10.10.10', '10.10.10.0/30', '10.10.10.0/24', ] self.assertEqual(expected, result) @ddt.data({'rule': '1.2.3.4', 'out': netaddr.IPAddress('1.2.3.4')}, {'rule': '1.2.3.4/32', 'out': netaddr.IPNetwork('1.2.3.4/32')}) @ddt.unpack def test_get_network_object_from_rule(self, rule, out): result = self.helper._get_network_object_from_rule(rule) self.assertEqual(out, result) def test_get_network_object_from_rule_invalid(self): self.assertRaises(netaddr.AddrFormatError, self.helper._get_network_object_from_rule, 'invalid') def test_get_target(self): target = self.helper.get_target(fake.NFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, target) def test_get_share_name_for_share(self): self.mock_client.get_volume_at_junction_path.return_value = ( fake.VOLUME) share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE) self.assertEqual(fake.SHARE_NAME, share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.NFS_SHARE_PATH) def test_get_share_name_for_share_not_found(self): self.mock_client.get_volume_at_junction_path.return_value = None share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE) self.assertIsNone(share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.NFS_SHARE_PATH) def test_get_target_missing_location(self): target = self.helper.get_target({'export_location': ''}) self.assertEqual('', target) def test_get_export_location(self): host_ip, export_path = self.helper._get_export_location( fake.NFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, host_ip) self.assertEqual('/' + fake.SHARE_NAME, export_path) def test_get_export_location_missing_location(self): fake_share = fake.NFS_SHARE.copy() fake_share['export_location'] = '' host_ip, export_path = self.helper._get_export_location(fake_share) self.assertEqual('', host_ip) self.assertEqual('', export_path) def test_get_temp_export_policy_name(self): self.mock_object(uuid, 'uuid1', mock.Mock(return_value='fake-uuid')) result = self.helper._get_temp_export_policy_name() self.assertEqual('temp_fake_uuid', result) def test_get_export_policy_name(self): result = self.helper._get_export_policy_name(fake.NFS_SHARE) self.assertEqual(fake.EXPORT_POLICY_NAME, result) def test_ensure_export_policy_equal(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = ( fake.EXPORT_POLICY_NAME) self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.assertFalse(self.mock_client.create_nfs_export_policy.called) self.assertFalse(self.mock_client.rename_nfs_export_policy.called) def test_ensure_export_policy_default(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = ( 'default') self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.mock_client.create_nfs_export_policy.assert_called_once_with( fake.EXPORT_POLICY_NAME) self.mock_client.set_nfs_export_policy_for_volume.\ assert_called_once_with(fake.SHARE_NAME, fake.EXPORT_POLICY_NAME) self.assertFalse(self.mock_client.rename_nfs_export_policy.called) def test_ensure_export_policy_rename(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = 'fake' self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.assertFalse(self.mock_client.create_nfs_export_policy.called) self.mock_client.rename_nfs_export_policy.assert_called_once_with( 'fake', fake.EXPORT_POLICY_NAME) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/fakes.py0000664000567000056710000000413412701407107030230 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.common import constants SHARE_NAME = 'fake_share' SHARE_ID = '9dba208c-9aa7-11e4-89d3-123b93f75cba' EXPORT_POLICY_NAME = 'policy_9dba208c_9aa7_11e4_89d3_123b93f75cba' SHARE_ADDRESS_1 = '10.10.10.10' SHARE_ADDRESS_2 = '10.10.10.20' CLIENT_ADDRESS_1 = '20.20.20.10' CLIENT_ADDRESS_2 = '20.20.20.20' CIFS_SHARE = { 'export_location': r'\\%s\%s' % (SHARE_ADDRESS_1, SHARE_NAME), 'id': SHARE_ID } NFS_SHARE_PATH = '/%s' % SHARE_NAME NFS_SHARE = { 'export_location': '%s:%s' % (SHARE_ADDRESS_1, NFS_SHARE_PATH), 'id': SHARE_ID } IP_ACCESS = { 'access_type': 'ip', 'access_to': CLIENT_ADDRESS_1, 'access_level': constants.ACCESS_LEVEL_RW, } USER_ACCESS = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': constants.ACCESS_LEVEL_RW, } VOLUME = { 'name': SHARE_NAME, } NEW_NFS_RULES = { '10.10.10.0/30': constants.ACCESS_LEVEL_RW, '10.10.10.0/24': constants.ACCESS_LEVEL_RO, '10.10.10.10': constants.ACCESS_LEVEL_RW, '10.10.20.0/24': constants.ACCESS_LEVEL_RW, '10.10.20.10': constants.ACCESS_LEVEL_RW, } EXISTING_CIFS_RULES = { 'user1': constants.ACCESS_LEVEL_RW, 'user2': constants.ACCESS_LEVEL_RO, 'user3': constants.ACCESS_LEVEL_RW, 'user4': constants.ACCESS_LEVEL_RO, } NEW_CIFS_RULES = { 'user1': constants.ACCESS_LEVEL_RW, 'user2': constants.ACCESS_LEVEL_RW, 'user3': constants.ACCESS_LEVEL_RO, 'user5': constants.ACCESS_LEVEL_RW, 'user6': constants.ACCESS_LEVEL_RO, } manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/0000775000567000056710000000000012701407265027231 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py0000664000567000056710000005375212701407107033147 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time import mock from oslo_config import cfg from manila.share import configuration from manila.share import driver from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp import options as na_opts from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake from manila.tests.share.drivers.netapp import fakes as na_fakes CONF = cfg.CONF class NetAppCDOTDataMotionTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionTestCase, self).setUp() self.backend = 'backend1' self.mock_cmode_client = self.mock_object(client_cmode, "NetAppCmodeClient", mock.Mock()) self.config = configuration.Configuration(driver.share_opts, config_group=self.backend) self.config.append_config_values(na_opts.netapp_cluster_opts) self.config.append_config_values(na_opts.netapp_connection_opts) self.config.append_config_values(na_opts.netapp_basicauth_opts) self.config.append_config_values(na_opts.netapp_transport_opts) self.config.append_config_values(na_opts.netapp_support_opts) self.config.append_config_values(na_opts.netapp_provisioning_opts) self.config.append_config_values(na_opts.netapp_replication_opts) CONF.set_override("share_backend_name", self.backend, group=self.backend) CONF.set_override("netapp_transport_type", "https", group=self.backend) CONF.set_override("netapp_login", "fake_user", group=self.backend) CONF.set_override("netapp_password", "fake_password", group=self.backend) CONF.set_override("netapp_server_hostname", "fake_hostname", group=self.backend) CONF.set_override("netapp_server_port", 8866, group=self.backend) def test_get_client_for_backend(self): self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=self.config)) data_motion.get_client_for_backend(self.backend) self.mock_cmode_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver=None) def test_get_client_for_backend_with_vserver(self): self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=self.config)) CONF.set_override("netapp_vserver", 'fake_vserver', group=self.backend) data_motion.get_client_for_backend(self.backend) self.mock_cmode_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver='fake_vserver') def test_get_config_for_backend(self): self.mock_object(data_motion, "CONF") data_motion.CONF.list_all_sections.return_value = [self.backend] config = data_motion.get_backend_configuration(self.backend) self.assertEqual(self.backend, config.share_backend_name) def test_get_config_for_backend_share_backend_name_mismatch(self): self.mock_object(data_motion, "CONF") configuration.Configuration(driver.share_opts, config_group='my_happy_stanza') self.config.append_config_values(na_opts.netapp_cluster_opts) self.config.append_config_values(na_opts.netapp_connection_opts) self.config.append_config_values(na_opts.netapp_basicauth_opts) self.config.append_config_values(na_opts.netapp_transport_opts) self.config.append_config_values(na_opts.netapp_support_opts) self.config.append_config_values(na_opts.netapp_provisioning_opts) self.config.append_config_values(na_opts.netapp_replication_opts) CONF.set_override("share_backend_name", self.backend, group='my_happy_stanza') data_motion.CONF.list_all_sections.return_value = ['my_happy_stanza'] config = data_motion.get_backend_configuration(self.backend) self.assertEqual(self.backend, config.share_backend_name) def test_get_config_for_backend_not_configured(self): self.mock_object(data_motion, "CONF") data_motion.CONF.list_all_sections.return_value = [] config = data_motion.get_backend_configuration(self.backend) self.assertIsNone(config) class NetAppCDOTDataMotionSessionTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionSessionTestCase, self).setUp() self.source_backend = 'backend1' self.dest_backend = 'backend2' config = configuration.Configuration(driver.share_opts, config_group=self.source_backend) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_replication_opts) self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=config)) self.mock_cmode_client = self.mock_object(client_cmode, "NetAppCmodeClient", mock.Mock()) self.dm_session = data_motion.DataMotionSession() self.fake_src_share = copy.deepcopy(fake.SHARE) self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER) self.source_vserver = 'source_vserver' self.fake_src_share_server['backend_details']['vserver_name'] = ( self.source_vserver ) self.fake_src_share['share_server'] = self.fake_src_share_server self.fake_src_share['id'] = 'c02d497a-236c-4852-812a-0d39373e312a' self.fake_src_vol_name = 'share_c02d497a_236c_4852_812a_0d39373e312a' self.fake_dest_share = copy.deepcopy(fake.SHARE) self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER) self.dest_vserver = 'dest_vserver' self.fake_dest_share_server['backend_details']['vserver_name'] = ( self.dest_vserver ) self.fake_dest_share['share_server'] = self.fake_dest_share_server self.fake_dest_share['id'] = '34fbaf57-745d-460f-8270-3378c2945e30' self.fake_dest_vol_name = 'share_34fbaf57_745d_460f_8270_3378c2945e30' self.mock_src_client = mock.Mock() self.mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.mock_dest_client, self.mock_src_client])) def test_create_snapmirror(self): mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_dest_client)) self.dm_session.create_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.create_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, schedule='hourly' ) mock_dest_client.initialize_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) def test_delete_snapmirror(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_src_client.release_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) def test_delete_snapmirror_does_not_exist(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError( code=netapp_api.EAPIERROR ) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_src_client.release_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) def test_delete_snapmirror_error_deleting(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError( code=netapp_api.ESOURCE_IS_DIFFERENT ) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_src_client.release_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) def test_delete_snapmirror_error_releasing(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_src_client.release_snapmirror.side_effect = ( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_src_client.release_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) def test_delete_snapmirror_without_release(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share, release=False) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) self.assertFalse(mock_src_client.release_snapmirror.called) def test_delete_snapmirror_source_unreachable(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, Exception])) self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) self.assertFalse(mock_src_client.release_snapmirror.called) def test_break_snapmirror(self): self.mock_object(self.dm_session, 'quiesce_then_abort') self.dm_session.break_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.break_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.dm_session.quiesce_then_abort.assert_called_once_with( self.fake_src_share, self.fake_dest_share) self.mock_dest_client.mount_volume.assert_called_once_with( self.fake_dest_vol_name) def test_break_snapmirror_wait_for_quiesced(self): self.mock_object(self.dm_session, 'quiesce_then_abort') self.dm_session.break_snapmirror(self.fake_src_share, self.fake_dest_share) self.dm_session.quiesce_then_abort.assert_called_once_with( self.fake_src_share, self.fake_dest_share) self.mock_dest_client.break_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.mock_dest_client.mount_volume.assert_called_once_with( self.fake_dest_vol_name) def test_quiesce_then_abort_timeout(self): self.mock_object(time, 'sleep') mock_get_snapmirrors = mock.Mock( return_value=[{'relationship-status': "transferring"}]) self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock_get_snapmirrors) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_quiesce_timeout = 10 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.quiesce_then_abort(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror.assert_called_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.mock_dest_client.abort_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name, clear_checkpoint=False ) def test_quiesce_then_abort_wait_for_quiesced(self): self.mock_object(time, 'sleep') self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock.Mock(side_effect=[ [{'relationship-status': "transferring"}], [{'relationship-status': "quiesced"}]])) self.dm_session.quiesce_then_abort(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_resync_snapmirror(self): self.dm_session.resync_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.resync_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_change_snapmirror_source(self): fake_new_src_share = copy.deepcopy(fake.SHARE) fake_new_src_share['id'] = 'd02d497a-236c-4852-812a-0d39373e312a' fake_new_src_share_name = 'share_d02d497a_236c_4852_812a_0d39373e312a' mock_new_src_client = mock.Mock() self.mock_object(self.dm_session, 'delete_snapmirror') self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.mock_dest_client, self.mock_src_client, self.mock_dest_client, mock_new_src_client])) self.dm_session.change_snapmirror_source( self.fake_dest_share, self.fake_src_share, fake_new_src_share, [self.fake_dest_share, self.fake_src_share, fake_new_src_share]) self.assertFalse(self.mock_src_client.release_snapmirror.called) self.assertEqual(4, self.dm_session.delete_snapmirror.call_count) self.dm_session.delete_snapmirror.assert_called_with( mock.ANY, mock.ANY, release=False ) self.mock_dest_client.create_snapmirror.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name, schedule='hourly' ) self.mock_dest_client.resync_snapmirror.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name ) def test_get_snapmirrors(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.get_snapmirrors(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state', 'source-vserver', 'source-volume', 'last-transfer-end-timestamp'] ) self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count) def test_update_snapmirror(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.update_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.update_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_resume_snapmirror(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.resume_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.resume_snapmirror.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/__init__.py0000664000567000056710000000000012701407107031323 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_single_svm.py0000664000567000056710000001641112701407107033634 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT single-SVM storage driver library. """ import mock from oslo_log import log from manila import exception from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm from manila.share.drivers.netapp import utils as na_utils from manila import test import manila.tests.share.drivers.netapp.dataontap.fakes as fake class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_single_svm.LOG, 'info', mock.Mock(side_effect=mock_logger.info)) config = fake.get_config_cmode() config.netapp_vserver = fake.VSERVER1 kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.client = self.library._client self.context = mock.Mock() def test_init(self): self.assertEqual(fake.VSERVER1, self.library._vserver) def test_check_for_setup_error(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() self.assertTrue(lib_single_svm.LOG.info.called) mock_super.assert_called_once_with() self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_no_vserver(self): self.library._vserver = None self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_vserver_not_found(self): self.library._client.vserver_exists.return_value = False self.assertRaises(exception.VserverNotFound, self.library.check_for_setup_error) def test_check_for_setup_error_cluster_creds_vserver_match(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = False self.library._client.list_vservers.return_value = [fake.VSERVER1] self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() mock_super.assert_called_once_with() self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_cluster_creds_vserver_mismatch(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = False self.library._client.list_vservers.return_value = [fake.VSERVER2] self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_no_aggregates(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.library.check_for_setup_error) self.assertTrue(self.library._find_matching_aggregates.called) def test_get_vserver(self): self.library._client.vserver_exists.return_value = True self.mock_object(self.library, '_get_api_client', mock.Mock(return_value='fake_client')) result_vserver, result_vserver_client = self.library._get_vserver() self.assertEqual(fake.VSERVER1, result_vserver) self.assertEqual('fake_client', result_vserver_client) def test_get_vserver_share_server_specified(self): self.assertRaises(exception.InvalidParameterValue, self.library._get_vserver, share_server=fake.SHARE_SERVER) def test_get_vserver_no_vserver(self): self.library._vserver = None self.assertRaises(exception.InvalidInput, self.library._get_vserver) def test_get_vserver_vserver_not_found(self): self.library._client.vserver_exists.return_value = False self.assertRaises(exception.VserverNotFound, self.library._get_vserver) def test_handle_housekeeping_tasks(self): mock_vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_vserver_client)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, '_handle_housekeeping_tasks') self.library._handle_housekeeping_tasks() self.assertTrue( mock_vserver_client.prune_deleted_nfs_export_policies.called) self.assertTrue(mock_vserver_client.prune_deleted_snapshots.called) self.assertTrue(mock_super.called) def test_find_matching_aggregates(self): mock_vserver_client = mock.Mock() mock_vserver_client.list_vserver_aggregates.return_value = ( fake.AGGREGATES) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_vserver_client)) self.library.configuration.netapp_aggregate_name_search_pattern = ( '.*_aggr_1') result = self.library._find_matching_aggregates() self.assertListEqual([fake.AGGREGATES[0]], result) def test_get_network_allocations_number(self): self.assertEqual(0, self.library.get_network_allocations_number()) def test_get_admin_network_allocations_number(self): result = self.library.get_admin_network_allocations_number() self.assertEqual(0, result) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py0000664000567000056710000043531312701407107032406 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT base storage driver library. """ import copy import math import socket import time import uuid import ddt import mock from oslo_log import log from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units from manila.common import constants from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake @ddt.ddt class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') self.mock_object(na_utils, 'setup_tracing') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_base.LOG, 'info', mock.Mock(side_effect=mock_logger.info)) self.mock_object(lib_base.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(lib_base.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(lib_base.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) kwargs = { 'configuration': fake.get_config_cmode(), 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_base.NetAppCmodeFileStorageLibrary(fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.client = self.library._client self.context = mock.Mock() self.fake_replica = copy.deepcopy(fake.SHARE) self.fake_replica_2 = copy.deepcopy(fake.SHARE) self.fake_replica_2['id'] = fake.SHARE_ID2 self.fake_replica_2['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) self.mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=self.mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') def test_init(self): self.assertEqual(fake.DRIVER_NAME, self.library.driver_name) self.assertEqual(1, na_utils.validate_driver_instantiation.call_count) self.assertEqual(1, na_utils.setup_tracing.call_count) self.assertListEqual([], self.library._licenses) self.assertDictEqual({}, self.library._clients) self.assertDictEqual({}, self.library._ssc_stats) self.assertIsNotNone(self.library._app_version) def test_do_setup(self): mock_get_api_client = self.mock_object(self.library, '_get_api_client') self.library.do_setup(self.context) mock_get_api_client.assert_called_once_with() self.library._client.check_for_cluster_credentials.\ assert_called_once_with() def test_check_for_setup_error(self): self.library._licenses = [] self.mock_object(self.library, '_get_licenses', mock.Mock(return_value=['fake_license'])) mock_start_periodic_tasks = self.mock_object(self.library, '_start_periodic_tasks') self.library.check_for_setup_error() self.assertEqual(['fake_license'], self.library._licenses) mock_start_periodic_tasks.assert_called_once_with() def test_get_vserver(self): self.assertRaises(NotImplementedError, self.library._get_vserver) def test_get_api_client(self): client_kwargs = fake.CLIENT_KWARGS.copy() # First call should proceed normally. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client1 = self.library._get_api_client() self.assertIsNotNone(client1) mock_client_constructor.assert_called_once_with(**client_kwargs) # Second call should yield the same object. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client2 = self.library._get_api_client() self.assertEqual(client1, client2) self.assertFalse(mock_client_constructor.called) def test_get_api_client_with_vserver(self): client_kwargs = fake.CLIENT_KWARGS.copy() client_kwargs['vserver'] = fake.VSERVER1 # First call should proceed normally. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client1 = self.library._get_api_client(vserver=fake.VSERVER1) self.assertIsNotNone(client1) mock_client_constructor.assert_called_once_with(**client_kwargs) # Second call should yield the same object. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client2 = self.library._get_api_client(vserver=fake.VSERVER1) self.assertEqual(client1, client2) self.assertFalse(mock_client_constructor.called) # A different vserver should work normally without caching. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client3 = self.library._get_api_client(vserver=fake.VSERVER2) self.assertNotEqual(client1, client3) client_kwargs['vserver'] = fake.VSERVER2 mock_client_constructor.assert_called_once_with(**client_kwargs) def test_get_licenses_both_protocols(self): self.library._have_cluster_creds = True self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=fake.LICENSES)) result = self.library._get_licenses() self.assertSequenceEqual(fake.LICENSES, result) self.assertEqual(0, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_one_protocol(self): self.library._have_cluster_creds = True licenses = list(fake.LICENSES) licenses.remove('nfs') self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=licenses)) result = self.library._get_licenses() self.assertListEqual(licenses, result) self.assertEqual(0, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_no_protocols(self): self.library._have_cluster_creds = True licenses = list(fake.LICENSES) licenses.remove('nfs') licenses.remove('cifs') self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=licenses)) result = self.library._get_licenses() self.assertListEqual(licenses, result) self.assertEqual(1, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_no_cluster_creds(self): self.library._have_cluster_creds = False result = self.library._get_licenses() self.assertListEqual([], result) self.assertEqual(1, lib_base.LOG.debug.call_count) def test_start_periodic_tasks(self): mock_update_ssc_info = self.mock_object(self.library, '_update_ssc_info') mock_handle_ems_logging = self.mock_object(self.library, '_handle_ems_logging') mock_handle_housekeeping_tasks = self.mock_object( self.library, '_handle_housekeeping_tasks') mock_ssc_periodic_task = mock.Mock() mock_ems_periodic_task = mock.Mock() mock_housekeeping_periodic_task = mock.Mock() mock_loopingcall = self.mock_object( loopingcall, 'FixedIntervalLoopingCall', mock.Mock(side_effect=[mock_ssc_periodic_task, mock_ems_periodic_task, mock_housekeeping_periodic_task])) self.library._start_periodic_tasks() self.assertTrue(mock_update_ssc_info.called) self.assertFalse(mock_handle_ems_logging.called) self.assertFalse(mock_housekeeping_periodic_task.called) mock_loopingcall.assert_has_calls( [mock.call(mock_update_ssc_info), mock.call(mock_handle_ems_logging), mock.call(mock_handle_housekeeping_tasks)]) self.assertTrue(mock_ssc_periodic_task.start.called) self.assertTrue(mock_ems_periodic_task.start.called) self.assertTrue(mock_housekeeping_periodic_task.start.called) def test_get_backend_share_name(self): result = self.library._get_backend_share_name(fake.SHARE_ID) expected = (fake.VOLUME_NAME_TEMPLATE % {'share_id': fake.SHARE_ID.replace('-', '_')}) self.assertEqual(expected, result) def test_get_backend_snapshot_name(self): result = self.library._get_backend_snapshot_name(fake.SNAPSHOT_ID) expected = 'share_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_') self.assertEqual(expected, result) def test_get_backend_cg_snapshot_name(self): result = self.library._get_backend_cg_snapshot_name(fake.SNAPSHOT_ID) expected = 'share_cg_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_') self.assertEqual(expected, result) def test_get_aggregate_space_cluster_creds(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library._client, 'get_cluster_aggregate_capacities', mock.Mock(return_value=fake.AGGREGATE_CAPACITIES)) result = self.library._get_aggregate_space() self.library._client.get_cluster_aggregate_capacities.\ assert_called_once_with(fake.AGGREGATES) self.assertDictEqual(fake.AGGREGATE_CAPACITIES, result) def test_get_aggregate_space_no_cluster_creds(self): self.library._have_cluster_creds = False self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library._client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=fake.AGGREGATE_CAPACITIES)) result = self.library._get_aggregate_space() self.library._client.get_vserver_aggregate_capacities.\ assert_called_once_with(fake.AGGREGATES) self.assertDictEqual(fake.AGGREGATE_CAPACITIES, result) def test_get_aggregate_node_cluster_creds(self): self.library._have_cluster_creds = True self.mock_object(self.library._client, 'get_node_for_aggregate', mock.Mock(return_value=fake.CLUSTER_NODE)) result = self.library._get_aggregate_node(fake.AGGREGATE) self.library._client.get_node_for_aggregate.\ assert_called_once_with(fake.AGGREGATE) self.assertEqual(fake.CLUSTER_NODE, result) def test_get_aggregate_node_no_cluster_creds(self): self.library._have_cluster_creds = False self.mock_object(self.library._client, 'get_node_for_aggregate') result = self.library._get_aggregate_node(fake.AGGREGATE) self.assertFalse(self.library._client.get_node_for_aggregate.called) self.assertIsNone(result) def test_get_share_stats(self): self.mock_object(self.library, '_get_pools', mock.Mock(return_value=fake.POOLS)) result = self.library.get_share_stats() expected = { 'share_backend_name': fake.BACKEND_NAME, 'driver_name': fake.DRIVER_NAME, 'vendor_name': 'NetApp', 'driver_version': '1.0', 'netapp_storage_family': 'ontap_cluster', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'consistency_group_support': 'host', 'pools': fake.POOLS, } self.assertDictEqual(expected, result) def test_get_share_stats_with_replication(self): self.library.configuration.replication_domain = "fake_domain" self.mock_object(self.library, '_get_pools', mock.Mock(return_value=fake.POOLS)) result = self.library.get_share_stats() expected = { 'share_backend_name': fake.BACKEND_NAME, 'driver_name': fake.DRIVER_NAME, 'vendor_name': 'NetApp', 'driver_version': '1.0', 'netapp_storage_family': 'ontap_cluster', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'consistency_group_support': 'host', 'replication_type': 'dr', 'replication_domain': 'fake_domain', 'pools': fake.POOLS, } self.assertDictEqual(expected, result) def test_get_share_server_pools(self): self.mock_object(self.library, '_get_pools', mock.Mock(return_value=fake.POOLS)) result = self.library.get_share_server_pools(fake.SHARE_SERVER) self.assertListEqual(fake.POOLS, result) @ddt.data( { 'capacities': fake.AGGREGATE_CAPACITIES, 'pools': fake.POOLS, }, { 'capacities': fake.AGGREGATE_CAPACITIES_VSERVER_CREDS, 'pools': fake.POOLS_VSERVER_CREDS } ) @ddt.unpack def test_get_pools(self, capacities, pools): self.mock_object(self.library, '_get_aggregate_space', mock.Mock(return_value=capacities)) self.library._ssc_stats = fake.SSC_INFO result = self.library._get_pools() self.assertListEqual(pools, result) def test_handle_ems_logging(self): self.mock_object(self.library, '_build_ems_log_message', mock.Mock(return_value=fake.EMS_MESSAGE)) self.library._handle_ems_logging() self.library._client.send_ems_log_message.assert_called_with( fake.EMS_MESSAGE) def test_build_ems_log_message(self): self.mock_object(socket, 'getfqdn', mock.Mock(return_value=fake.HOST_NAME)) result = self.library._build_ems_log_message() fake_ems_log = { 'computer-name': fake.HOST_NAME, 'event-id': '0', 'event-source': 'Manila driver %s' % fake.DRIVER_NAME, 'app-version': fake.APP_VERSION, 'category': 'provisioning', 'event-description': 'OpenStack Manila connected to cluster node', 'log-level': '6', 'auto-support': 'false' } self.assertDictEqual(fake_ems_log, result) def test_find_matching_aggregates(self): self.assertRaises(NotImplementedError, self.library._find_matching_aggregates) @ddt.data(('NFS', nfs_cmode.NetAppCmodeNFSHelper), ('nfs', nfs_cmode.NetAppCmodeNFSHelper), ('CIFS', cifs_cmode.NetAppCmodeCIFSHelper), ('cifs', cifs_cmode.NetAppCmodeCIFSHelper)) @ddt.unpack def test_get_helper(self, protocol, helper_type): fake_share = fake.SHARE.copy() fake_share['share_proto'] = protocol mock_check_license_for_protocol = self.mock_object( self.library, '_check_license_for_protocol') result = self.library._get_helper(fake_share) mock_check_license_for_protocol.assert_called_once_with( protocol.lower()) self.assertTrue(type(result) == helper_type) def test_get_helper_invalid_protocol(self): fake_share = fake.SHARE.copy() fake_share['share_proto'] = 'iSCSI' self.mock_object(self.library, '_check_license_for_protocol') self.assertRaises(exception.NetAppException, self.library._get_helper, fake_share) def test_check_license_for_protocol_no_cluster_creds(self): self.library._have_cluster_creds = False result = self.library._check_license_for_protocol('fake_protocol') self.assertIsNone(result) def test_check_license_for_protocol_have_license(self): self.library._have_cluster_creds = True self.library._licenses = ['base', 'fake_protocol'] result = self.library._check_license_for_protocol('FAKE_PROTOCOL') self.assertIsNone(result) def test_check_license_for_protocol_newly_licensed_protocol(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_get_licenses', mock.Mock(return_value=['base', 'nfs'])) self.library._licenses = ['base'] result = self.library._check_license_for_protocol('NFS') self.assertIsNone(result) self.assertTrue(self.library._get_licenses.called) def test_check_license_for_protocol_unlicensed_protocol(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_get_licenses', mock.Mock(return_value=['base'])) self.library._licenses = ['base'] self.assertRaises(exception.NetAppException, self.library._check_license_for_protocol, 'NFS') def test_get_pool_has_pool(self): result = self.library.get_pool(fake.SHARE) self.assertEqual(fake.POOL_NAME, result) self.assertFalse(self.client.get_aggregate_for_volume.called) def test_get_pool_no_pool(self): fake_share = copy.deepcopy(fake.SHARE) fake_share['host'] = '%(host)s@%(backend)s' % { 'host': fake.HOST_NAME, 'backend': fake.BACKEND_NAME} self.client.get_aggregate_for_volume.return_value = fake.POOL_NAME result = self.library.get_pool(fake_share) self.assertEqual(fake.POOL_NAME, result) self.assertTrue(self.client.get_aggregate_for_volume.called) def test_get_pool_raises(self): fake_share = copy.deepcopy(fake.SHARE) fake_share['host'] = '%(host)s@%(backend)s' % { 'host': fake.HOST_NAME, 'backend': fake.BACKEND_NAME} self.client.get_aggregate_for_volume.side_effect = ( exception.NetAppException) self.assertRaises(exception.NetAppException, self.library.get_pool, fake_share) def test_create_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container = self.mock_object(self.library, '_allocate_container') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value='fake_export_location')) result = self.library.create_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) mock_allocate_container.assert_called_once_with(fake.SHARE, vserver_client) mock_create_export.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) self.assertEqual('fake_export_location', result) def test_create_share_from_snapshot(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value='fake_export_location')) result = self.library.create_share_from_snapshot( self.context, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) mock_allocate_container_from_snapshot.assert_called_once_with( fake.SHARE, fake.SNAPSHOT, vserver_client) mock_create_export.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) self.assertEqual('fake_export_location', result) def test_allocate_container(self): self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=fake.POOL_NAME)) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) mock_remap_standard_boolean_extra_specs = self.mock_object( self.library, '_remap_standard_boolean_extra_specs', mock.Mock(return_value=fake.EXTRA_SPEC)) self.mock_object(self.library, '_check_boolean_extra_specs_validity') self.mock_object(self.library, '_get_boolean_provisioning_options', mock.Mock(return_value=fake.PROVISIONING_OPTIONS)) vserver_client = mock.Mock() self.library._allocate_container(fake.EXTRA_SPEC_SHARE, vserver_client) vserver_client.create_volume.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], thin_provisioned=True, snapshot_policy='default', language='en-US', dedup_enabled=True, compression_enabled=False, max_files=5000, snapshot_reserve=8) mock_remap_standard_boolean_extra_specs.assert_called_once_with( fake.EXTRA_SPEC) def test_remap_standard_boolean_extra_specs(self): extra_specs = copy.deepcopy(fake.OVERLAPPING_EXTRA_SPEC) result = self.library._remap_standard_boolean_extra_specs(extra_specs) self.assertDictEqual(fake.REMAPPED_OVERLAPPING_EXTRA_SPEC, result) def test_allocate_container_as_replica(self): self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=fake.POOL_NAME)) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) self.mock_object(self.library, '_check_boolean_extra_specs_validity') self.mock_object(self.library, '_get_boolean_provisioning_options', mock.Mock(return_value=fake.PROVISIONING_OPTIONS)) vserver_client = mock.Mock() self.library._allocate_container(fake.EXTRA_SPEC_SHARE, vserver_client, replica=True) vserver_client.create_volume.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], thin_provisioned=True, snapshot_policy='default', language='en-US', dedup_enabled=True, compression_enabled=False, max_files=5000, snapshot_reserve=8, volume_type='dp') def test_allocate_container_no_pool_name(self): self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=None)) self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_get_provisioning_options') vserver_client = mock.Mock() self.assertRaises(exception.InvalidHost, self.library._allocate_container, fake.SHARE, vserver_client) self.library._get_backend_share_name.assert_called_once_with( fake.SHARE['id']) share_utils.extract_host.assert_called_once_with(fake.SHARE['host'], level='pool') self.assertEqual(0, self.library._check_extra_specs_validity.call_count) self.assertEqual(0, self.library._get_provisioning_options.call_count) def test_check_extra_specs_validity(self): boolean_extra_spec_keys = list( self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) mock_bool_check = self.mock_object( self.library, '_check_boolean_extra_specs_validity') mock_string_check = self.mock_object( self.library, '_check_string_extra_specs_validity') self.library._check_extra_specs_validity( fake.EXTRA_SPEC_SHARE, fake.EXTRA_SPEC) mock_bool_check.assert_called_once_with( fake.EXTRA_SPEC_SHARE, fake.EXTRA_SPEC, boolean_extra_spec_keys) mock_string_check.assert_called_once_with( fake.EXTRA_SPEC_SHARE, fake.EXTRA_SPEC) def test_check_extra_specs_validity_empty_spec(self): result = self.library._check_extra_specs_validity( fake.EXTRA_SPEC_SHARE, fake.EMPTY_EXTRA_SPEC) self.assertIsNone(result) def test_check_extra_specs_validity_invalid_value(self): self.assertRaises( exception.Invalid, self.library._check_extra_specs_validity, fake.EXTRA_SPEC_SHARE, fake.INVALID_EXTRA_SPEC) def test_check_string_extra_specs_validity(self): result = self.library._check_string_extra_specs_validity( fake.EXTRA_SPEC_SHARE, fake.EXTRA_SPEC) self.assertIsNone(result) def test_check_string_extra_specs_validity_empty_spec(self): result = self.library._check_string_extra_specs_validity( fake.EXTRA_SPEC_SHARE, fake.EMPTY_EXTRA_SPEC) self.assertIsNone(result) def test_check_string_extra_specs_validity_invalid_value(self): self.assertRaises( exception.NetAppException, self.library._check_string_extra_specs_validity, fake.EXTRA_SPEC_SHARE, fake.INVALID_MAX_FILE_EXTRA_SPEC) def test_check_boolean_extra_specs_validity_invalid_value(self): self.assertRaises( exception.Invalid, self.library._check_boolean_extra_specs_validity, fake.EXTRA_SPEC_SHARE, fake.INVALID_EXTRA_SPEC, list(self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) def test_check_extra_specs_validity_invalid_combination(self): self.assertRaises( exception.Invalid, self.library._check_boolean_extra_specs_validity, fake.EXTRA_SPEC_SHARE, fake.INVALID_EXTRA_SPEC_COMBO, list(self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) def test_get_provisioning_options(self): result = self.library._get_provisioning_options(fake.EXTRA_SPEC) self.assertEqual(fake.PROVISIONING_OPTIONS, result) def test_get_provisioning_options_missing_spec(self): result = self.library._get_provisioning_options( fake.SHORT_BOOLEAN_EXTRA_SPEC) self.assertEqual( fake.PROVISIONING_OPTIONS_BOOLEAN_THIN_PROVISIONED_TRUE, result) def test_get_provisioning_options_implicit_false(self): result = self.library._get_provisioning_options( fake.EMPTY_EXTRA_SPEC) expected = { 'language': None, 'max_files': None, 'snapshot_policy': None, 'thin_provisioned': False, 'compression_enabled': False, 'dedup_enabled': False, } self.assertEqual(expected, result) def test_get_boolean_provisioning_options(self): result = self.library._get_boolean_provisioning_options( fake.SHORT_BOOLEAN_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_BOOLEAN, result) def test_get_boolean_provisioning_options_missing_spec(self): result = self.library._get_boolean_provisioning_options( fake.SHORT_BOOLEAN_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_BOOLEAN, result) def test_get_boolean_provisioning_options_implicit_false(self): expected = { 'thin_provisioned': False, 'dedup_enabled': False, 'compression_enabled': False, } result = self.library._get_boolean_provisioning_options( fake.EMPTY_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(expected, result) def test_get_string_provisioning_options(self): result = self.library._get_string_provisioning_options( fake.STRING_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING, result) def test_get_string_provisioning_options_missing_spec(self): result = self.library._get_string_provisioning_options( fake.SHORT_STRING_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING_MISSING_SPECS, result) def test_get_string_provisioning_options_implicit_false(self): result = self.library._get_string_provisioning_options( fake.EMPTY_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING_DEFAULT, result) def test_check_if_max_files_is_valid_with_negative_integer(self): self.assertRaises(exception.NetAppException, self.library._check_if_max_files_is_valid, fake.SHARE, -1) def test_check_if_max_files_is_valid_with_string(self): self.assertRaises(ValueError, self.library._check_if_max_files_is_valid, fake.SHARE, 'abc') def test_allocate_container_no_pool(self): vserver_client = mock.Mock() fake_share = copy.deepcopy(fake.SHARE) fake_share['host'] = fake_share['host'].split('#')[0] self.assertRaises(exception.InvalidHost, self.library._allocate_container, fake_share, vserver_client) def test_check_aggregate_extra_specs_validity(self): self.library._have_cluster_creds = True self.library._ssc_stats = fake.SSC_INFO result = self.library._check_aggregate_extra_specs_validity( fake.AGGREGATES[0], fake.EXTRA_SPEC) self.assertIsNone(result) def test_check_aggregate_extra_specs_validity_no_match(self): self.library._have_cluster_creds = True self.library._ssc_stats = fake.SSC_INFO self.assertRaises(exception.NetAppException, self.library._check_aggregate_extra_specs_validity, fake.AGGREGATES[1], fake.EXTRA_SPEC) def test_allocate_container_from_snapshot(self): vserver_client = mock.Mock() self.library._allocate_container_from_snapshot(fake.SHARE, fake.SNAPSHOT, vserver_client) share_name = self.library._get_backend_share_name(fake.SHARE['id']) parent_share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) parent_snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) vserver_client.create_volume_clone.assert_called_once_with( share_name, parent_share_name, parent_snapshot_name) def test_share_exists(self): vserver_client = mock.Mock() vserver_client.volume_exists.return_value = True result = self.library._share_exists(fake.SHARE_NAME, vserver_client) self.assertTrue(result) def test_share_exists_not_found(self): vserver_client = mock.Mock() vserver_client.volume_exists.return_value = False result = self.library._share_exists(fake.SHARE_NAME, vserver_client) self.assertFalse(result) def test_delete_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) mock_remove_export.assert_called_once_with(fake.SHARE, vserver_client) mock_deallocate_container.assert_called_once_with(share_name, vserver_client) self.assertEqual(0, lib_base.LOG.info.call_count) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_share_no_share_server(self, get_vserver_exception): self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) self.assertFalse(mock_share_exists.called) self.assertFalse(mock_remove_export.called) self.assertFalse(mock_deallocate_container.called) self.assertEqual(1, lib_base.LOG.warning.call_count) def test_delete_share_not_found(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) self.assertFalse(mock_remove_export.called) self.assertFalse(mock_deallocate_container.called) self.assertEqual(1, lib_base.LOG.info.call_count) def test_deallocate_container(self): vserver_client = mock.Mock() self.library._deallocate_container(fake.SHARE_NAME, vserver_client) vserver_client.unmount_volume.assert_called_with(fake.SHARE_NAME, force=True) vserver_client.offline_volume.assert_called_with(fake.SHARE_NAME) vserver_client.delete_volume.assert_called_with(fake.SHARE_NAME) def test_create_export(self): protocol_helper = mock.Mock() callback = (lambda export_address, export_path='fake_export_path': ':'.join([export_address, export_path])) protocol_helper.create_share.return_value = callback self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) vserver_client = mock.Mock() vserver_client.get_network_interfaces.return_value = fake.LIFS fake_interface_addresses_with_metadata = copy.deepcopy( fake.INTERFACE_ADDRESSES_WITH_METADATA) mock_get_export_addresses_with_metadata = self.mock_object( self.library, '_get_export_addresses_with_metadata', mock.Mock(return_value=fake_interface_addresses_with_metadata)) result = self.library._create_export(fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) self.assertEqual(fake.NFS_EXPORTS, result) mock_get_export_addresses_with_metadata.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER, fake.LIFS) protocol_helper.create_share.assert_called_once_with( fake.SHARE, fake.SHARE_NAME) def test_create_export_lifs_not_found(self): self.mock_object(self.library, '_get_helper') vserver_client = mock.Mock() vserver_client.get_network_interfaces.return_value = [] self.assertRaises(exception.NetAppException, self.library._create_export, fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) def test_get_export_addresses_with_metadata(self): mock_get_aggregate_node = self.mock_object( self.library, '_get_aggregate_node', mock.Mock(return_value=fake.CLUSTER_NODES[0])) mock_get_admin_addresses_for_share_server = self.mock_object( self.library, '_get_admin_addresses_for_share_server', mock.Mock(return_value=[fake.LIF_ADDRESSES[1]])) result = self.library._get_export_addresses_with_metadata( fake.SHARE, fake.SHARE_SERVER, fake.LIFS) self.assertEqual(fake.INTERFACE_ADDRESSES_WITH_METADATA, result) mock_get_aggregate_node.assert_called_once_with(fake.POOL_NAME) mock_get_admin_addresses_for_share_server.assert_called_once_with( fake.SHARE_SERVER) def test_get_export_addresses_with_metadata_node_unknown(self): mock_get_aggregate_node = self.mock_object( self.library, '_get_aggregate_node', mock.Mock(return_value=None)) mock_get_admin_addresses_for_share_server = self.mock_object( self.library, '_get_admin_addresses_for_share_server', mock.Mock(return_value=[fake.LIF_ADDRESSES[1]])) result = self.library._get_export_addresses_with_metadata( fake.SHARE, fake.SHARE_SERVER, fake.LIFS) expected = copy.deepcopy(fake.INTERFACE_ADDRESSES_WITH_METADATA) for key, value in expected.items(): value['preferred'] = None self.assertEqual(expected, result) mock_get_aggregate_node.assert_called_once_with(fake.POOL_NAME) mock_get_admin_addresses_for_share_server.assert_called_once_with( fake.SHARE_SERVER) def test_get_admin_addresses_for_share_server(self): result = self.library._get_admin_addresses_for_share_server( fake.SHARE_SERVER) self.assertEqual([fake.ADMIN_NETWORK_ALLOCATIONS[0]['ip_address']], result) def test_get_admin_addresses_for_share_server_no_share_server(self): result = self.library._get_admin_addresses_for_share_server(None) self.assertEqual([], result) @ddt.data(True, False) def test_sort_export_locations_by_preferred_paths(self, reverse): export_locations = copy.copy(fake.NFS_EXPORTS) if reverse: export_locations.reverse() result = self.library._sort_export_locations_by_preferred_paths( export_locations) self.assertEqual(fake.NFS_EXPORTS, result) def test_remove_export(self): protocol_helper = mock.Mock() protocol_helper.get_target.return_value = 'fake_target' self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) vserver_client = mock.Mock() self.library._remove_export(fake.SHARE, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.get_target.assert_called_once_with(fake.SHARE) protocol_helper.delete_share.assert_called_once_with(fake.SHARE, fake.SHARE_NAME) def test_remove_export_target_not_found(self): protocol_helper = mock.Mock() protocol_helper.get_target.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) vserver_client = mock.Mock() self.library._remove_export(fake.SHARE, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.get_target.assert_called_once_with(fake.SHARE) self.assertFalse(protocol_helper.delete_share.called) def test_create_snapshot(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_update = self.library.create_snapshot( self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(snapshot_name, model_update['provider_location']) def test_delete_snapshot(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name) def test_delete_snapshot_with_provider_location(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['provider_location'] = 'fake_provider_location' self.library.delete_snapshot(self.context, fake_snapshot, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) vserver_client.delete_snapshot.assert_called_once_with( share_name, fake_snapshot['provider_location']) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_snapshot_no_share_server(self, get_vserver_exception): self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) self.assertFalse(mock_delete_snapshot.called) def test_delete_snapshot_not_found(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.SnapshotResourceNotFound( name=fake.SNAPSHOT_NAME))) self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name) def test_delete_snapshot_not_unique(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.NetAppException())) self.assertRaises(exception.NetAppException, self.library.delete_snapshot, self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name) def test__delete_snapshot(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.library._delete_snapshot(vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(vserver_client.get_clone_children_for_snapshot.called) self.assertFalse(vserver_client.split_volume_clone.called) self.assertFalse(vserver_client.soft_delete_snapshot.called) def test__delete_snapshot_busy_volume_clone(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = ( fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE) vserver_client.get_clone_children_for_snapshot.return_value = ( fake.CDOT_CLONE_CHILDREN) self.library._delete_snapshot(vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(vserver_client.delete_snapshot.called) vserver_client.get_clone_children_for_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.split_volume_clone.assert_has_calls([ mock.call(fake.CDOT_CLONE_CHILD_1), mock.call(fake.CDOT_CLONE_CHILD_2), ]) vserver_client.soft_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) def test__delete_snapshot_busy_snapmirror(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = ( fake.CDOT_SNAPSHOT_BUSY_SNAPMIRROR) self.assertRaises(exception.ShareSnapshotIsBusy, self.library._delete_snapshot, vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(vserver_client.delete_snapshot.called) self.assertFalse(vserver_client.get_clone_children_for_snapshot.called) self.assertFalse(vserver_client.split_volume_clone.called) self.assertFalse(vserver_client.soft_delete_snapshot.called) def test_manage_existing(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_manage_container = self.mock_object( self.library, '_manage_container', mock.Mock(return_value=fake.SHARE_SIZE)) mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value=fake.NFS_EXPORTS)) result = self.library.manage_existing(fake.SHARE, {}) expected = { 'size': fake.SHARE_SIZE, 'export_locations': fake.NFS_EXPORTS } mock_manage_container.assert_called_once_with(fake.SHARE, vserver_client) mock_create_export.assert_called_once_with(fake.SHARE, None, fake.VSERVER1, vserver_client) self.assertDictEqual(expected, result) def test_unmanage(self): result = self.library.unmanage(fake.SHARE) self.assertIsNone(result) def test_manage_container(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) mock_get_volume_to_manage = self.mock_object( vserver_client, 'get_volume_to_manage', mock.Mock(return_value=fake.FLEXVOL_TO_MANAGE)) mock_validate_volume_for_manage = self.mock_object( self.library, '_validate_volume_for_manage') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) mock_check_extra_specs_validity = self.mock_object( self.library, '_check_extra_specs_validity') mock_check_aggregate_extra_specs_validity = self.mock_object( self.library, '_check_aggregate_extra_specs_validity') result = self.library._manage_container(share_to_manage, vserver_client) mock_get_volume_to_manage.assert_called_once_with( fake.POOL_NAME, fake.FLEXVOL_NAME) mock_validate_volume_for_manage.assert_called_once_with( fake.FLEXVOL_TO_MANAGE, vserver_client) mock_check_extra_specs_validity.assert_called_once_with( share_to_manage, fake.EXTRA_SPEC) mock_check_aggregate_extra_specs_validity.assert_called_once_with( fake.POOL_NAME, fake.EXTRA_SPEC) vserver_client.unmount_volume.assert_called_once_with( fake.FLEXVOL_NAME) vserver_client.set_volume_name.assert_called_once_with( fake.FLEXVOL_NAME, fake.SHARE_NAME) vserver_client.mount_volume.assert_called_once_with( fake.SHARE_NAME) vserver_client.manage_volume.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, **self.library._get_provisioning_options(fake.EXTRA_SPEC)) original_data = { 'original_name': fake.FLEXVOL_TO_MANAGE['name'], 'original_junction_path': fake.FLEXVOL_TO_MANAGE['junction-path'], } self.library.private_storage.update.assert_called_once_with( fake.SHARE['id'], original_data) expected_size = int( math.ceil(float(fake.FLEXVOL_TO_MANAGE['size']) / units.Gi)) self.assertEqual(expected_size, result) def test_manage_container_invalid_export_location(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.assertRaises(exception.ManageInvalidShare, self.library._manage_container, share_to_manage, vserver_client) def test_manage_container_not_found(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(vserver_client, 'get_volume_to_manage', mock.Mock(return_value=None)) self.assertRaises(exception.ManageInvalidShare, self.library._manage_container, share_to_manage, vserver_client) def test_manage_container_invalid_extra_specs(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(vserver_client, 'get_volume_to_manage', mock.Mock(return_value=fake.FLEXVOL_TO_MANAGE)) self.mock_object(self.library, '_validate_volume_for_manage') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) self.mock_object(self.library, '_check_extra_specs_validity', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.ManageExistingShareTypeMismatch, self.library._manage_container, share_to_manage, vserver_client) def test_validate_volume_for_manage(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) result = self.library._validate_volume_for_manage( fake.FLEXVOL_TO_MANAGE, vserver_client) self.assertIsNone(result) @ddt.data({ 'attribute': 'type', 'value': 'dp', }, { 'attribute': 'style', 'value': 'infinitevol', }) @ddt.unpack def test_validate_volume_for_manage_invalid_volume(self, attribute, value): flexvol_to_manage = copy.deepcopy(fake.FLEXVOL_TO_MANAGE) flexvol_to_manage[attribute] = value vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, flexvol_to_manage, vserver_client) def test_validate_volume_for_manage_luns_present(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=True) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, fake.FLEXVOL_TO_MANAGE, vserver_client) def test_validate_volume_for_manage_junctioned_volumes_present(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=True) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, fake.FLEXVOL_TO_MANAGE, vserver_client) def test_create_consistency_group(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) result = self.library.create_consistency_group( self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) self.assertIsNone(result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_create_consistency_group_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) self.assertRaises(type(get_vserver_exception), self.library.create_consistency_group, self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_consistency_group_from_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(side_effect=[['loc3'], ['loc4']])) result = self.library.create_consistency_group_from_cgsnapshot( self.context, fake.CONSISTENCY_GROUP_DEST, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_update_list = [ {'id': fake.SHARE_ID3, 'export_locations': ['loc3']}, {'id': fake.SHARE_ID4, 'export_locations': ['loc4']} ] expected = (None, share_update_list) self.assertEqual(expected, result) mock_allocate_container_from_snapshot.assert_has_calls([ mock.call(fake.COLLATED_CGSNAPSHOT_INFO[0]['share'], fake.COLLATED_CGSNAPSHOT_INFO[0]['snapshot'], vserver_client, mock.ANY), mock.call(fake.COLLATED_CGSNAPSHOT_INFO[1]['share'], fake.COLLATED_CGSNAPSHOT_INFO[1]['snapshot'], vserver_client, mock.ANY), ]) mock_create_export.assert_has_calls([ mock.call(fake.COLLATED_CGSNAPSHOT_INFO[0]['share'], fake.SHARE_SERVER, fake.VSERVER1, vserver_client), mock.call(fake.COLLATED_CGSNAPSHOT_INFO[1]['share'], fake.SHARE_SERVER, fake.VSERVER1, vserver_client), ]) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_consistency_group_from_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(side_effect=[['loc3'], ['loc4']])) fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['cgsnapshot_members'] = [] result = self.library.create_consistency_group_from_cgsnapshot( self.context, fake.CONSISTENCY_GROUP_DEST, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertEqual((None, None), result) self.assertFalse(mock_allocate_container_from_snapshot.called) self.assertFalse(mock_create_export.called) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_collate_cg_snapshot_info(self): result = self.library._collate_cg_snapshot_info( fake.CONSISTENCY_GROUP_DEST, fake.CG_SNAPSHOT) self.assertEqual(fake.COLLATED_CGSNAPSHOT_INFO, result) def test_collate_cg_snapshot_info_invalid(self): fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['cgsnapshot_members'] = [] self.assertRaises(exception.InvalidConsistencyGroup, self.library._collate_cg_snapshot_info, fake.CONSISTENCY_GROUP_DEST, fake_cg_snapshot) def test_delete_consistency_group(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) result = self.library.delete_consistency_group( self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) self.assertIsNone(result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_consistency_group_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) result = self.library.delete_consistency_group( self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) self.assertIsNone(result) self.assertEqual(1, lib_base.LOG.warning.call_count) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) result = self.library.create_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) vserver_client.create_cg_snapshot.assert_called_once_with( share_names, snapshot_name) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['cgsnapshot_members'] = [] result = self.library.create_cgsnapshot( self.context, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertFalse(vserver_client.create_cg_snapshot.called) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') result = self.library.delete_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) mock_delete_snapshot.assert_has_calls([ mock.call(vserver_client, share_names[0], snapshot_name), mock.call(vserver_client, share_names[1], snapshot_name) ]) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['cgsnapshot_members'] = [] result = self.library.delete_cgsnapshot( self.context, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertFalse(mock_delete_snapshot.called) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot_snapshots_not_found(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.SnapshotResourceNotFound( name='fake'))) result = self.library.delete_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) mock_delete_snapshot.assert_has_calls([ mock.call(vserver_client, share_names[0], snapshot_name), mock.call(vserver_client, share_names[1], snapshot_name) ]) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_cgsnapshot_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) result = self.library.delete_cgsnapshot( self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) self.assertEqual((None, None), result) self.assertEqual(1, lib_base.LOG.warning.call_count) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_extend_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_set_volume_size = self.mock_object(vserver_client, 'set_volume_size') new_size = fake.SHARE['size'] * 2 self.library.extend_share(fake.SHARE, new_size) mock_set_volume_size.assert_called_once_with(fake.SHARE_NAME, new_size) def test_shrink_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_set_volume_size = self.mock_object(vserver_client, 'set_volume_size') new_size = fake.SHARE['size'] - 1 self.library.shrink_share(fake.SHARE, new_size) mock_set_volume_size.assert_called_once_with(fake.SHARE_NAME, new_size) def test_update_access(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) self.library.update_access(self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.update_access.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS]) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_update_access_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) mock_share_exists = self.mock_object(self.library, '_share_exists') self.library.update_access(self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) self.assertFalse(mock_share_exists.called) self.assertFalse(protocol_helper.set_client.called) self.assertFalse(protocol_helper.update_access.called) def test_update_access_share_not_found(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) self.assertRaises(exception.ShareResourceNotFound, self.library.update_access, self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) self.assertFalse(protocol_helper.set_client.called) self.assertFalse(protocol_helper.update_access.called) def test_update_access_to_active_replica(self): fake_share = copy.deepcopy(fake.SHARE) fake_share['replica_state'] = constants.REPLICA_STATE_ACTIVE vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) self.library.update_access(self.context, fake_share, [fake.SHARE_ACCESS], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.update_access.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS]) def test_update_access_to_in_sync_replica(self): fake_share = copy.deepcopy(fake.SHARE) fake_share['replica_state'] = constants.REPLICA_STATE_IN_SYNC self.library.update_access(self.context, fake_share, [fake.SHARE_ACCESS], [], [], share_server=fake.SHARE_SERVER) def test_setup_server(self): self.assertRaises(NotImplementedError, self.library.setup_server, fake.NETWORK_INFO) def test_teardown_server(self): self.assertRaises(NotImplementedError, self.library.teardown_server, fake.SHARE_SERVER['backend_details']) def test_get_network_allocations_number(self): self.assertRaises(NotImplementedError, self.library.get_network_allocations_number) def test_update_ssc_info(self): self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_update_ssc_aggr_info = self.mock_object(self.library, '_update_ssc_aggr_info') self.library._update_ssc_info() expected = { fake.AGGREGATES[0]: {}, fake.AGGREGATES[1]: {} } self.assertDictEqual(expected, self.library._ssc_stats) self.assertTrue(mock_update_ssc_aggr_info.called) def test_update_ssc_info_no_aggregates(self): self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) mock_update_ssc_aggr_info = self.mock_object(self.library, '_update_ssc_aggr_info') self.library._update_ssc_info() self.assertDictEqual({}, self.library._ssc_stats) self.assertFalse(mock_update_ssc_aggr_info.called) def test_update_ssc_aggr_info(self): self.library._have_cluster_creds = True self.mock_object(self.client, 'get_aggregate_raid_types', mock.Mock(return_value=fake.SSC_RAID_TYPES)) self.mock_object(self.client, 'get_aggregate_disk_types', mock.Mock(return_value=fake.SSC_DISK_TYPES)) ssc_stats = { fake.AGGREGATES[0]: {}, fake.AGGREGATES[1]: {} } self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats) self.assertDictEqual(fake.SSC_INFO, ssc_stats) def test_update_ssc_aggr_info_not_found(self): self.library._have_cluster_creds = True self.mock_object(self.client, 'get_aggregate_raid_types', mock.Mock(return_value={})) self.mock_object(self.client, 'get_aggregate_disk_types', mock.Mock(return_value={})) ssc_stats = {} self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats) self.assertDictEqual({}, ssc_stats) def test_update_ssc_aggr_info_no_cluster_creds(self): self.library._have_cluster_creds = False ssc_stats = {} self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats) self.assertDictEqual({}, ssc_stats) self.assertFalse(self.library._client.get_aggregate_raid_types.called) def test_create_replica(self): self.mock_object(self.library, '_allocate_container') mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) expected_model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } model_update = self.library.create_replica( None, [fake.SHARE], fake.SHARE, [], [], share_server=None) self.assertDictMatch(expected_model_update, model_update) mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE, fake.SHARE) data_motion.get_client_for_backend.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) def test_create_replica_with_share_server(self): self.mock_object(self.library, '_allocate_container', mock.Mock()) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) expected_model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } model_update = self.library.create_replica( None, [fake.SHARE], fake.SHARE, [], [], share_server=fake.SHARE_SERVER) self.assertDictMatch(expected_model_update, model_update) mock_dm_session.create_snapmirror.assert_called_once_with(fake.SHARE, fake.SHARE) data_motion.get_client_for_backend.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) def test_delete_replica(self): self.mock_object(self.library, '_deallocate_container', mock.Mock()) self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) result = self.library.delete_replica(None, [fake.SHARE], fake.SHARE, [], share_server=None) self.assertEqual(None, result) mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE, fake.SHARE) self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count) data_motion.get_client_for_backend.assert_called_with( fake.BACKEND_NAME, vserver_name=mock.ANY) self.assertEqual(1, data_motion.get_client_for_backend.call_count) def test_delete_replica_with_share_server(self): self.mock_object(self.library, '_deallocate_container', mock.Mock()) self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) result = self.library.delete_replica(None, [fake.SHARE], fake.SHARE, [], share_server=fake.SHARE_SERVER) self.assertEqual(None, result) mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE, fake.SHARE) self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count) data_motion.get_client_for_backend.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) def test_delete_replica_share_absent_on_backend(self): self.mock_object(self.library, '_deallocate_container', mock.Mock()) self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) result = self.library.delete_replica(None, [fake.SHARE], fake.SHARE, [], share_server=None) self.assertEqual(None, result) self.assertFalse(self.library._deallocate_container.called) mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE, fake.SHARE) self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count) data_motion.get_client_for_backend.assert_called_with( fake.BACKEND_NAME, vserver_name=mock.ANY) self.assertEqual(1, data_motion.get_client_for_backend.call_count) def test_update_replica_state_no_snapmirror_share_creating(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) replica = copy.deepcopy(fake.SHARE) replica['status'] = constants.STATUS_CREATING result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertFalse(self.mock_dm_session.create_snapmirror.called) self.assertEqual(constants.STATUS_OUT_OF_SYNC, result) def test_update_replica_state_no_snapmirror_create_failed(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) self.mock_dm_session.create_snapmirror.side_effect = ( netapp_api.NaApiError(code=0)) replica = copy.deepcopy(fake.SHARE) replica['status'] = constants.REPLICA_STATE_OUT_OF_SYNC result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertTrue(self.mock_dm_session.create_snapmirror.called) self.assertEqual(constants.STATUS_ERROR, result) @ddt.data(constants.STATUS_ERROR, constants.STATUS_AVAILABLE) def test_update_replica_state_no_snapmirror(self, status): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) replica = copy.deepcopy(fake.SHARE) replica['status'] = status result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertEqual(1, self.mock_dm_session.create_snapmirror.call_count) self.assertEqual(constants.STATUS_OUT_OF_SYNC, result) def test_update_replica_state_broken_snapmirror(self): fake_snapmirror = { 'mirror-state': 'broken-off', 'relationship-status': 'idle', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) vserver_client.resync_snapmirror.assert_called_once_with( fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name'] ) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_snapmirror_still_initializing(self): fake_snapmirror = { 'mirror-state': 'uninitialized', 'relationship-status': 'transferring', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_fail_to_get_snapmirrors(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors.side_effect = ( netapp_api.NaApiError(code=0)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertTrue(self.mock_dm_session.get_snapmirrors.called) self.assertEqual(constants.STATUS_ERROR, result) def test_update_replica_state_broken_snapmirror_resync_error(self): fake_snapmirror = { 'mirror-state': 'broken-off', 'relationship-status': 'idle', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) vserver_client.resync_snapmirror.side_effect = netapp_api.NaApiError result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) vserver_client.resync_snapmirror.assert_called_once_with( fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name'] ) self.assertEqual(constants.STATUS_ERROR, result) def test_update_replica_state_stale_snapmirror(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'last-transfer-end-timestamp': '%s' % float( timeutils.utcnow_ts() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_in_sync(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result) def test_update_replica_state_backend_volume_absent(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(exception.ShareResourceNotFound, self.library.update_replica_state, None, [fake.SHARE], fake.SHARE, None, [], share_server=None) def test_update_replica_state_in_sync_with_snapshots(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = fake.SHARE['id'] snapshots = [{'share_replica_snapshot': fake_snapshot}] vserver_client = mock.Mock() self.mock_object(vserver_client, 'snapshot_exists', mock.Mock( return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, snapshots, share_server=None) self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result) def test_update_replica_state_missing_snapshot(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = fake.SHARE['id'] snapshots = [{'share_replica_snapshot': fake_snapshot}] vserver_client = mock.Mock() self.mock_object(vserver_client, 'snapshot_exists', mock.Mock( return_value=False)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, snapshots, share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_promote_replica(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [], share_server=None) self.mock_dm_session.change_snapmirror_source.assert_called_once_with( self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY ) self.assertEqual(2, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) self.assertEqual(constants.STATUS_ACTIVE, actual_replica_2['access_rules_status']) def test_promote_replica_destination_unreachable(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object( self.library, '_convert_destination_replica_to_independent', mock.Mock(side_effect=exception.StorageCommunicationException)) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [], share_server=None) self.assertEqual(1, len(replicas)) actual_replica = replicas[0] self.assertEqual(constants.STATUS_ERROR, actual_replica['replica_state']) self.assertEqual(constants.STATUS_ERROR, actual_replica['status']) def test_promote_replica_more_than_two_replicas(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2, fake_replica_3], self.fake_replica_2, [], share_server=None) self.mock_dm_session.change_snapmirror_source.assert_has_calls([ mock.call(fake_replica_3, self.fake_replica, self.fake_replica_2, mock.ANY), mock.call(self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY) ], any_order=True) self.assertEqual(3, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) actual_replica_3 = list(filter( lambda x: x['id'] == fake_replica_3['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_3['replica_state']) def test_promote_replica_with_access_rules(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) mock_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [fake.SHARE_ACCESS], share_server=None) self.mock_dm_session.change_snapmirror_source.assert_has_calls([ mock.call(self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY) ], any_order=True) self.assertEqual(2, len(replicas)) share_name = self.library._get_backend_share_name( self.fake_replica_2['id']) mock_helper.update_access.assert_called_once_with(self.fake_replica_2, share_name, [fake.SHARE_ACCESS]) def test_convert_destination_replica_to_independent(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, [], share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) def test_convert_destination_replica_to_independent_update_failed(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object( self.mock_dm_session, 'update_snapmirror', mock.Mock(side_effect=exception.StorageCommunicationException)) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, [], share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) def test_promote_replica_fail_to_set_access_rules(self): fake_helper = mock.Mock() fake_helper.update_access.side_effect = Exception fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.change_snapmirror_source.assert_called_once_with( self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY ) self.assertEqual(2, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) self.assertEqual(constants.STATUS_OUT_OF_SYNC, actual_replica_2['access_rules_status']) def test_convert_destination_replica_to_independent_with_access_rules( self): fake_helper = mock.Mock() fake_helper.update_access.side_effect = Exception fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) self.assertEqual(constants.STATUS_OUT_OF_SYNC, replica['access_rules_status']) def test_convert_destination_replica_to_independent_failed_access_rules( self): fake_helper = mock.Mock() fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) fake_helper.assert_has_calls([ mock.call.set_client(mock.ANY), mock.call.update_access(mock.ANY, mock.ANY, fake_access_rules), ]) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) self.assertEqual(constants.STATUS_ACTIVE, replica['access_rules_status']) def test_safe_change_replica_source(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica = self.library._safe_change_replica_source( self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3] ) self.assertEqual([], replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, replica['replica_state']) def test_safe_change_replica_source_destination_unreachable(self): self.mock_dm_session.change_snapmirror_source.side_effect = ( exception.StorageCommunicationException ) fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica = self.library._safe_change_replica_source( self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3] ) self.assertEqual([], replica['export_locations']) self.assertEqual(constants.STATUS_ERROR, replica['replica_state']) self.assertEqual(constants.STATUS_ERROR, replica['status']) def test_safe_change_replica_source_error(self): self.mock_dm_session.change_snapmirror_source.side_effect = ( netapp_api.NaApiError(code=0) ) fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica = self.library._safe_change_replica_source( self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3] ) self.assertEqual([], replica['export_locations']) self.assertEqual(constants.STATUS_ERROR, replica['replica_state']) def test_create_replicated_snapshot(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_create_replicated_snapshot_with_creating_replica(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['host'] = None replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2)], any_order=True ) def test_create_replicated_snapshot_no_snapmirror(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_create_replicated_snapshot_update_error(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError() ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(netapp_api.NaApiError, self.library.create_replicated_snapshot, self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) def test_delete_replicated_snapshot(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_delete_replicated_snapshot_replica_still_creating(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['host'] = None replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2)], any_order=True ) def test_delete_replicated_snapshot_missing_snapmirror(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot['busy'] = False fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake_snapshot self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_delete_replicated_snapshot_update_error(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError() ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot['busy'] = False fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = str(uuid.uuid4()) fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = str(uuid.uuid4()) fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake_snapshot self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(netapp_api.NaApiError, self.library.delete_replicated_snapshot, self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) def test_update_replicated_snapshot_still_creating(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertEqual(None, model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) def test_update_replicated_snapshot_still_creating_no_host(self): self.fake_replica_2['host'] = None vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertEqual(None, model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) def test_update_replicated_snapshot_no_snapmirror(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_dm_session.update_snapmirror.side_effect = ( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) ) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertEqual(None, model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) def test_update_replicated_snapshot_update_error(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_dm_session.update_snapmirror.side_effect = ( netapp_api.NaApiError() ) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name self.assertRaises(netapp_api.NaApiError, self.library.update_replicated_snapshot, replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) def test_update_replicated_snapshot_still_deleting(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_DELETING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertEqual(None, model_update) def test_update_replicated_snapshot_created(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertEqual(constants.STATUS_AVAILABLE, model_update['status']) self.assertEqual(snapshot_name, model_update['provider_location']) def test_update_replicated_snapshot_created_no_provider_location(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_ACTIVE fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['status'] = constants.STATUS_CREATING fake_snapshot_2['share_id'] = self.fake_replica_2['id'] model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot, fake_snapshot_2], fake_snapshot_2) self.assertEqual(constants.STATUS_AVAILABLE, model_update['status']) self.assertEqual(snapshot_name, model_update['provider_location']) def test_update_replicated_snapshot_deleted(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_DELETING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name self.assertRaises(exception.SnapshotResourceNotFound, self.library.update_replicated_snapshot, replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) def test_update_replicated_snapshot_no_provider_locations(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot['provider_location'] = None model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertEqual(None, model_update) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py0000664000567000056710000007013612701407107033511 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT multi-SVM storage driver library. """ import copy import ddt import mock from oslo_log import log from manila import context from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake @ddt.ddt class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_multi_svm.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(lib_multi_svm.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) kwargs = { 'configuration': fake.get_config_cmode(), 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.library._client.get_ontapi_version.return_value = (1, 21) self.client = self.library._client self.context = mock.Mock() def test_check_for_setup_error_cluster_creds_no_vserver(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() self.assertTrue(self.library._find_matching_aggregates.called) mock_super.assert_called_once_with() def test_check_for_setup_error_cluster_creds_with_vserver(self): self.library._have_cluster_creds = True self.library.configuration.netapp_vserver = fake.VSERVER1 self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() mock_super.assert_called_once_with() self.assertTrue(self.library._find_matching_aggregates.called) self.assertTrue(lib_multi_svm.LOG.warning.called) def test_check_for_setup_error_vserver_creds(self): self.library._have_cluster_creds = False self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_no_aggregates(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.library.check_for_setup_error) self.assertTrue(self.library._find_matching_aggregates.called) def test_get_vserver_no_share_server(self): self.assertRaises(exception.InvalidInput, self.library._get_vserver) def test_get_vserver_no_backend_details(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server.pop('backend_details') kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_none_backend_details(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details'] = None kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_no_vserver(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details'].pop('vserver_name') kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_none_vserver(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details']['vserver_name'] = None kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_not_found(self): self.library._client.vserver_exists.return_value = False kwargs = {'share_server': fake.SHARE_SERVER} self.assertRaises(exception.VserverNotFound, self.library._get_vserver, **kwargs) def test_get_vserver(self): self.library._client.vserver_exists.return_value = True self.mock_object(self.library, '_get_api_client', mock.Mock(return_value='fake_client')) result = self.library._get_vserver(share_server=fake.SHARE_SERVER) self.assertTupleEqual((fake.VSERVER1, 'fake_client'), result) def test_handle_housekeeping_tasks(self): self.mock_object(self.client, 'prune_deleted_nfs_export_policies') self.mock_object(self.client, 'prune_deleted_snapshots') mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, '_handle_housekeeping_tasks') self.library._handle_housekeeping_tasks() self.assertTrue(self.client.prune_deleted_nfs_export_policies.called) self.assertTrue(self.client.prune_deleted_snapshots.called) self.assertTrue(mock_super.called) def test_find_matching_aggregates(self): self.mock_object(self.client, 'list_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.library.configuration.netapp_aggregate_name_search_pattern = ( '.*_aggr_1') result = self.library._find_matching_aggregates() self.assertListEqual([fake.AGGREGATES[0]], result) def test_setup_server(self): mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) mock_create_vserver = self.mock_object(self.library, '_create_vserver') mock_validate_network_type = self.mock_object( self.library, '_validate_network_type') result = self.library.setup_server(fake.NETWORK_INFO) self.assertTrue(mock_validate_network_type.called) self.assertTrue(mock_get_vserver_name.called) self.assertTrue(mock_create_vserver.called) self.assertDictEqual({'vserver_name': fake.VSERVER1}, result) def test_setup_server_with_error(self): mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) fake_exception = exception.ManilaException("fake") mock_create_vserver = self.mock_object( self.library, '_create_vserver', mock.Mock(side_effect=fake_exception)) mock_validate_network_type = self.mock_object( self.library, '_validate_network_type') self.assertRaises( exception.ManilaException, self.library.setup_server, fake.NETWORK_INFO) self.assertTrue(mock_validate_network_type.called) self.assertTrue(mock_get_vserver_name.called) self.assertTrue(mock_create_vserver.called) self.assertDictEqual( {'server_details': {'vserver_name': fake.VSERVER1}}, fake_exception.detail_data) @ddt.data( {'network_info': {'network_type': 'vlan', 'segmentation_id': 1000}}, {'network_info': {'network_type': None, 'segmentation_id': None}}, {'network_info': {'network_type': 'flat', 'segmentation_id': None}}) @ddt.unpack def test_validate_network_type_with_valid_network_types(self, network_info): self.library._validate_network_type(network_info) @ddt.data( {'network_info': {'network_type': 'vxlan', 'segmentation_id': 1000}}, {'network_info': {'network_type': 'gre', 'segmentation_id': 100}}) @ddt.unpack def test_validate_network_type_with_invalid_network_types(self, network_info): self.assertRaises(exception.NetworkBadConfigurationException, self.library._validate_network_type, network_info) def test_get_vserver_name(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id actual_result = self.library._get_vserver_name(vserver_id) self.assertEqual(vserver_name, actual_result) def test_create_vserver(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id vserver_client = mock.Mock() self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_create_ipspace', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_create_vserver_lifs') self.mock_object(self.library, '_create_vserver_admin_lif') self.library._create_vserver(vserver_name, fake.NETWORK_INFO) self.library._create_ipspace.assert_called_with(fake.NETWORK_INFO) self.library._client.create_vserver.assert_called_with( vserver_name, fake.ROOT_VOLUME_AGGREGATE, fake.ROOT_VOLUME, fake.AGGREGATES, fake.IPSPACE) self.library._get_api_client.assert_called_with(vserver=vserver_name) self.library._create_vserver_lifs.assert_called_with( vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE) self.library._create_vserver_admin_lif.assert_called_with( vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE) self.assertTrue(vserver_client.enable_nfs.called) self.library._client.setup_security_services.assert_called_with( fake.NETWORK_INFO['security_services'], vserver_client, vserver_name) def test_create_vserver_already_present(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=True)) self.assertRaises(exception.NetAppException, self.library._create_vserver, vserver_name, fake.NETWORK_INFO) @ddt.data(netapp_api.NaApiError, exception.NetAppException) def test_create_vserver_lif_creation_failure(self, lif_exception): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id vserver_client = mock.Mock() self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_create_ipspace', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_create_vserver_lifs', mock.Mock(side_effect=lif_exception)) self.mock_object(self.library, '_delete_vserver') self.assertRaises(lif_exception, self.library._create_vserver, vserver_name, fake.NETWORK_INFO) self.library._get_api_client.assert_called_with(vserver=vserver_name) self.assertTrue(self.library._client.create_vserver.called) self.library._create_vserver_lifs.assert_called_with( vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE) self.library._delete_vserver.assert_called_once_with( vserver_name, security_services=None) self.assertFalse(vserver_client.enable_nfs.called) self.assertEqual(1, lib_multi_svm.LOG.error.call_count) def test_get_valid_ipspace_name(self): result = self.library._get_valid_ipspace_name(fake.IPSPACE_ID) expected = 'ipspace_' + fake.IPSPACE_ID.replace('-', '_') self.assertEqual(expected, result) def test_create_ipspace_not_supported(self): self.library._client.features.IPSPACES = False result = self.library._create_ipspace(fake.NETWORK_INFO) self.assertIsNone(result) @ddt.data(None, 'flat') def test_create_ipspace_not_vlan(self, network_type): self.library._client.features.IPSPACES = True network_info = copy.deepcopy(fake.NETWORK_INFO) network_info['network_allocations'][0]['segmentation_id'] = None network_info['network_allocations'][0]['network_type'] = network_type result = self.library._create_ipspace(network_info) self.assertEqual('Default', result) def test_create_ipspace_not_neutron(self): self.library._client.features.IPSPACES = True network_info = copy.deepcopy(fake.NETWORK_INFO) network_info['neutron_subnet_id'] = None network_info['nova_net_id'] = 'fake_nova_net_id' result = self.library._create_ipspace(network_info) self.assertEqual('Default', result) def test_create_ipspace_already_present(self): self.library._client.features.IPSPACES = True self.mock_object(self.library._client, 'ipspace_exists', mock.Mock(return_value=True)) result = self.library._create_ipspace(fake.NETWORK_INFO) expected = self.library._get_valid_ipspace_name( fake.NETWORK_INFO['neutron_subnet_id']) self.assertEqual(expected, result) self.library._client.ipspace_exists.assert_has_calls([ mock.call(expected)]) self.assertFalse(self.library._client.create_ipspace.called) def test_create_ipspace(self): self.library._client.features.IPSPACES = True self.mock_object(self.library._client, 'ipspace_exists', mock.Mock(return_value=False)) result = self.library._create_ipspace(fake.NETWORK_INFO) expected = self.library._get_valid_ipspace_name( fake.NETWORK_INFO['neutron_subnet_id']) self.assertEqual(expected, result) self.library._client.ipspace_exists.assert_has_calls([ mock.call(expected)]) self.library._client.create_ipspace.assert_has_calls([ mock.call(expected)]) def test_create_vserver_lifs(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(side_effect=['fake_lif1', 'fake_lif2'])) self.mock_object(self.library, '_create_lif') self.library._create_vserver_lifs(fake.VSERVER1, 'fake_vserver_client', fake.NETWORK_INFO, fake.IPSPACE) self.library._create_lif.assert_has_calls([ mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[0], 'fake_lif1', fake.NETWORK_INFO['network_allocations'][0]), mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[1], 'fake_lif2', fake.NETWORK_INFO['network_allocations'][1])]) def test_create_vserver_admin_lif(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(return_value='fake_admin_lif')) self.mock_object(self.library, '_create_lif') self.library._create_vserver_admin_lif(fake.VSERVER1, 'fake_vserver_client', fake.NETWORK_INFO, fake.IPSPACE) self.library._create_lif.assert_has_calls([ mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[0], 'fake_admin_lif', fake.NETWORK_INFO['admin_network_allocations'][0])]) def test_create_vserver_admin_lif_no_admin_network(self): fake_network_info = copy.deepcopy(fake.NETWORK_INFO) fake_network_info['admin_network_allocations'] = [] self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(return_value='fake_admin_lif')) self.mock_object(self.library, '_create_lif') self.library._create_vserver_admin_lif(fake.VSERVER1, 'fake_vserver_client', fake_network_info, fake.IPSPACE) self.assertFalse(self.library._create_lif.called) def test_get_node_data_port(self): self.mock_object(self.client, 'list_node_data_ports', mock.Mock(return_value=fake.NODE_DATA_PORTS)) self.library.configuration.netapp_port_name_search_pattern = 'e0c' result = self.library._get_node_data_port(fake.CLUSTER_NODE) self.assertEqual('e0c', result) self.library._client.list_node_data_ports.assert_has_calls([ mock.call(fake.CLUSTER_NODE)]) def test_get_node_data_port_no_match(self): self.mock_object(self.client, 'list_node_data_ports', mock.Mock(return_value=fake.NODE_DATA_PORTS)) self.library.configuration.netapp_port_name_search_pattern = 'ifgroup1' self.assertRaises(exception.NetAppException, self.library._get_node_data_port, fake.CLUSTER_NODE) def test_get_lif_name(self): result = self.library._get_lif_name( 'fake_node', fake.NETWORK_INFO['network_allocations'][0]) self.assertEqual('os_132dbb10-9a36-46f2-8d89-3d909830c356', result) def test_create_lif(self): vserver_client = mock.Mock() vserver_client.network_interface_exists = mock.Mock( return_value=False) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.library._create_lif(vserver_client, 'fake_vserver', 'fake_ipspace', 'fake_node', 'fake_lif', fake.NETWORK_INFO['network_allocations'][0]) self.library._client.create_network_interface.assert_has_calls([ mock.call('10.10.10.10', '255.255.255.0', '1000', 'fake_node', 'fake_port', 'fake_vserver', 'fake_lif', 'fake_ipspace')]) def test_create_lif_if_nonexistent_already_present(self): vserver_client = mock.Mock() vserver_client.network_interface_exists = mock.Mock( return_value=True) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.library._create_lif(vserver_client, 'fake_vserver', fake.IPSPACE, 'fake_node', 'fake_lif', fake.NETWORK_INFO['network_allocations'][0]) self.assertFalse(self.library._client.create_network_interface.called) def test_get_network_allocations_number(self): self.library._client.list_cluster_nodes.return_value = ( fake.CLUSTER_NODES) result = self.library.get_network_allocations_number() self.assertEqual(len(fake.CLUSTER_NODES), result) def test_get_admin_network_allocations_number(self): result = self.library.get_admin_network_allocations_number( 'fake_admin_network_api') self.assertEqual(1, result) def test_get_admin_network_allocations_number_no_admin_network(self): result = self.library.get_admin_network_allocations_number(None) self.assertEqual(0, result) def test_teardown_server(self): self.library._client.vserver_exists.return_value = True mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server( fake.SHARE_SERVER['backend_details'], security_services=fake.NETWORK_INFO['security_services']) self.library._client.vserver_exists.assert_called_once_with( fake.VSERVER1) mock_delete_vserver.assert_called_once_with( fake.VSERVER1, security_services=fake.NETWORK_INFO['security_services']) @ddt.data(None, {}, {'vserver_name': None}) def test_teardown_server_no_share_server(self, server_details): mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server(server_details) self.assertFalse(mock_delete_vserver.called) self.assertTrue(lib_multi_svm.LOG.warning.called) def test_teardown_server_no_vserver(self): self.library._client.vserver_exists.return_value = False mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server( fake.SHARE_SERVER['backend_details'], security_services=fake.NETWORK_INFO['security_services']) self.library._client.vserver_exists.assert_called_once_with( fake.VSERVER1) self.assertFalse(mock_delete_vserver.called) self.assertTrue(lib_multi_svm.LOG.warning.called) def test_delete_vserver_no_ipspace(self): self.mock_object(self.library._client, 'get_vserver_ipspace', mock.Mock(return_value=None)) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) security_services = fake.NETWORK_INFO['security_services'] self.library._delete_vserver(fake.VSERVER1, security_services=security_services) self.library._client.get_vserver_ipspace.assert_called_once_with( fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.assertFalse(self.library._client.delete_ipspace.called) def test_delete_vserver_ipspace_has_data_vservers(self): self.mock_object(self.library._client, 'get_vserver_ipspace', mock.Mock(return_value=fake.IPSPACE)) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'ipspace_has_data_vservers', mock.Mock(return_value=True)) security_services = fake.NETWORK_INFO['security_services'] self.library._delete_vserver(fake.VSERVER1, security_services=security_services) self.library._client.get_vserver_ipspace.assert_called_once_with( fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.assertFalse(self.library._client.delete_ipspace.called) def test_delete_vserver_with_ipspace(self): self.mock_object(self.library._client, 'get_vserver_ipspace', mock.Mock(return_value=fake.IPSPACE)) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'ipspace_has_data_vservers', mock.Mock(return_value=False)) security_services = fake.NETWORK_INFO['security_services'] self.library._delete_vserver(fake.VSERVER1, security_services=security_services) self.library._client.get_vserver_ipspace.assert_called_once_with( fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.library._client.delete_ipspace.assert_called_once_with( fake.IPSPACE) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_driver_interfaces.py0000664000567000056710000000510112701407107034330 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp file share driver interfaces """ import mock from manila.share.drivers.netapp.dataontap.cluster_mode import drv_multi_svm from manila.share.drivers.netapp.dataontap.cluster_mode import drv_single_svm from manila import test class NetAppFileStorageDriverInterfaceTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageDriverInterfaceTestCase, self).setUp() self.mock_object(drv_multi_svm.NetAppCmodeMultiSvmShareDriver, '__init__', mock.Mock(return_value=None)) self.mock_object(drv_single_svm.NetAppCmodeSingleSvmShareDriver, '__init__', mock.Mock(return_value=None)) self.drv_multi_svm = drv_multi_svm.NetAppCmodeMultiSvmShareDriver() self.drv_single_svm = drv_single_svm.NetAppCmodeSingleSvmShareDriver() def test_driver_interfaces_match(self): """Ensure the NetApp file storage driver interfaces match. The two file share Manila drivers from NetApp (cDOT multi-SVM, cDOT single-SVM) are merely passthrough shim layers atop a common file storage library. Bugs are easily introduced when a Manila method is exposed via a subset of those driver shims. This test ensures they remain in sync and the library features are uniformly available in the drivers. """ # Get local functions of each driver interface multi_svm_methods = self._get_local_functions(self.drv_multi_svm) single_svm_methods = self._get_local_functions(self.drv_single_svm) # Ensure NetApp file share driver shims are identical self.assertSetEqual(multi_svm_methods, single_svm_methods) def _get_local_functions(self, obj): """Get function names of an object without superclass functions.""" return set([key for key, value in type(obj).__dict__.items() if callable(value)]) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/__init__.py0000664000567000056710000000000012701407107026636 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/fakes.py0000664000567000056710000003675212701407107026217 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight All rights reserved. # Copyright (c) 2015 Tom Barron All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from manila.common import constants import manila.tests.share.drivers.netapp.fakes as na_fakes BACKEND_NAME = 'fake_backend_name' DRIVER_NAME = 'fake_driver_name' APP_VERSION = 'fake_app_vsersion' HOST_NAME = 'fake_host' POOL_NAME = 'fake_pool' VSERVER1 = 'fake_vserver_1' VSERVER2 = 'fake_vserver_2' LICENSES = ('base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', 'snaprestore', 'snapvault') VOLUME_NAME_TEMPLATE = 'share_%(share_id)s' VSERVER_NAME_TEMPLATE = 'os_%s' AGGREGATE_NAME_SEARCH_PATTERN = '(.*)' SHARE_NAME = 'share_7cf7c200_d3af_4e05_b87e_9167c95dfcad' FLEXVOL_NAME = 'fake_volume' JUNCTION_PATH = '/%s' % FLEXVOL_NAME EXPORT_LOCATION = '%s:%s' % (HOST_NAME, JUNCTION_PATH) SNAPSHOT_NAME = 'fake_snapshot' CONSISTENCY_GROUP_NAME = 'fake_consistency_group' SHARE_SIZE = 10 TENANT_ID = '24cb2448-13d8-4f41-afd9-eff5c4fd2a57' SHARE_ID = '7cf7c200-d3af-4e05-b87e-9167c95dfcad' SHARE_ID2 = 'b51c5a31-aa5b-4254-9ee8-7d39fa4c8c38' SHARE_ID3 = '1379991d-037b-4897-bf3a-81b4aac72eff' SHARE_ID4 = '1cb41aad-fd9b-4964-8059-646f69de925e' PARENT_SHARE_ID = '585c3935-2aa9-437c-8bad-5abae1076555' SNAPSHOT_ID = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26451' CONSISTENCY_GROUP_ID = '65bfa2c9-dc6c-4513-951a-b8d15b453ad8' CONSISTENCY_GROUP_ID2 = '35f5c1ea-45fb-40c4-98ae-2a2a17554159' CG_SNAPSHOT_ID = '6ddd8a6b-5df7-417b-a2ae-3f6e449f4eea' CG_SNAPSHOT_MEMBER_ID1 = '629f79ef-b27e-4596-9737-30f084e5ba29' CG_SNAPSHOT_MEMBER_ID2 = 'e876aa9c-a322-4391-bd88-9266178262be' FREE_CAPACITY = 10000000000 TOTAL_CAPACITY = 20000000000 AGGREGATE = 'manila_aggr_1' AGGREGATES = ('manila_aggr_1', 'manila_aggr_2') ROOT_VOLUME_AGGREGATE = 'manila1' ROOT_VOLUME = 'root' CLUSTER_NODE = 'cluster1_01' CLUSTER_NODES = ('cluster1_01', 'cluster1_02') NODE_DATA_PORT = 'e0c' NODE_DATA_PORTS = ('e0c', 'e0d') LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s' SHARE_TYPE_ID = '26e89a5b-960b-46bb-a8cf-0778e653098f' SHARE_TYPE_NAME = 'fake_share_type' IPSPACE = 'fake_ipspace' IPSPACE_ID = '27d38c27-3e8b-4d7d-9d91-fcf295e3ac8f' CLIENT_KWARGS = { 'username': 'admin', 'trace': False, 'hostname': '127.0.0.1', 'vserver': None, 'transport_type': 'https', 'password': 'pass', 'port': '443' } SHARE = { 'id': SHARE_ID, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'project_id': TENANT_ID, 'name': SHARE_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': 'fake_share_type_id', 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'network_info': { 'network_allocations': [{'ip_address': 'ip'}] }, 'replica_state': constants.REPLICA_STATE_ACTIVE, } FLEXVOL_TO_MANAGE = { 'aggregate': POOL_NAME, 'junction-path': '/%s' % FLEXVOL_NAME, 'name': FLEXVOL_NAME, 'type': 'rw', 'style': 'flex', 'size': '1610612736', # rounds down to 1 GB } EXTRA_SPEC = { 'netapp:thin_provisioned': 'true', 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', 'netapp:dedup': 'True', 'netapp:compression': 'false', 'netapp:max_files': 5000, 'netapp_disk_type': 'FCAL', 'netapp_raid_type': 'raid4', } PROVISIONING_OPTIONS = { 'thin_provisioned': True, 'snapshot_policy': 'default', 'language': 'en-US', 'dedup_enabled': True, 'compression_enabled': False, 'max_files': 5000, } PROVISIONING_OPTIONS_BOOLEAN = { 'thin_provisioned': True, 'dedup_enabled': False, 'compression_enabled': False, } PROVISIONING_OPTIONS_BOOLEAN_THIN_PROVISIONED_TRUE = { 'thin_provisioned': True, 'snapshot_policy': None, 'language': None, 'dedup_enabled': False, 'compression_enabled': False, 'max_files': None, } PROVISIONING_OPTIONS_STRING = { 'snapshot_policy': 'default', 'language': 'en-US', 'max_files': 5000, } PROVISIONING_OPTIONS_STRING_MISSING_SPECS = { 'snapshot_policy': 'default', 'language': 'en-US', 'max_files': None, } PROVISIONING_OPTIONS_STRING_DEFAULT = { 'snapshot_policy': None, 'language': None, 'max_files': None, } SHORT_BOOLEAN_EXTRA_SPEC = { 'netapp:thin_provisioned': 'true', } STRING_EXTRA_SPEC = { 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', 'netapp:max_files': 5000, } SHORT_STRING_EXTRA_SPEC = { 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', } INVALID_EXTRA_SPEC = { 'netapp:thin_provisioned': 'ture', 'netapp:snapshot_policy': 'wrong_default', 'netapp:language': 'abc', } INVALID_EXTRA_SPEC_COMBO = { 'netapp:dedup': 'false', 'netapp:compression': 'true' } INVALID_MAX_FILE_EXTRA_SPEC = { 'netapp:max_files': -1, } EMPTY_EXTRA_SPEC = {} SHARE_TYPE = { 'id': SHARE_TYPE_ID, 'name': SHARE_TYPE_NAME, 'extra_specs': EXTRA_SPEC } OVERLAPPING_EXTRA_SPEC = { 'compression': ' True', 'netapp:compression': 'true', 'dedupe': ' True', 'netapp:dedup': 'false', 'thin_provisioning': ' False', 'netapp:thin_provisioned': 'true', } REMAPPED_OVERLAPPING_EXTRA_SPEC = { 'netapp:compression': 'true', 'netapp:dedup': 'true', 'netapp:thin_provisioned': 'false', } EXTRA_SPEC_SHARE = copy.deepcopy(SHARE) EXTRA_SPEC_SHARE['share_type_id'] = SHARE_TYPE_ID USER_NETWORK_ALLOCATIONS = [ { 'id': '132dbb10-9a36-46f2-8d89-3d909830c356', 'ip_address': '10.10.10.10', 'cidr': '10.10.10.0/24', 'segmentation_id': '1000', 'network_type': 'vlan', 'label': 'user', }, { 'id': '7eabdeed-bad2-46ea-bd0f-a33884c869e0', 'ip_address': '10.10.10.20', 'cidr': '10.10.10.0/24', 'segmentation_id': '1000', 'network_type': 'vlan', 'label': 'user', } ] ADMIN_NETWORK_ALLOCATIONS = [ { 'id': '132dbb10-9a36-46f2-8d89-3d909830c356', 'ip_address': '10.10.20.10', 'cidr': '10.10.20.0/24', 'segmentation_id': None, 'network_type': 'flat', 'label': 'admin', }, ] NETWORK_INFO = { 'server_id': '56aafd02-4d44-43d7-b784-57fc88167224', 'security_services': ['fake_ldap', 'fake_kerberos', 'fake_ad', ], 'network_allocations': USER_NETWORK_ALLOCATIONS, 'admin_network_allocations': ADMIN_NETWORK_ALLOCATIONS, 'neutron_subnet_id': '62bf1c2c-18eb-421b-8983-48a6d39aafe0', } NETWORK_INFO_NETMASK = '255.255.255.0' SHARE_SERVER = { 'share_network_id': 'c5b3a865-56d0-4d88-abe5-879965e099c9', 'backend_details': { 'vserver_name': VSERVER1 }, 'network_allocations': (USER_NETWORK_ALLOCATIONS + ADMIN_NETWORK_ALLOCATIONS), } SNAPSHOT = { 'id': SNAPSHOT_ID, 'project_id': TENANT_ID, 'share_id': PARENT_SHARE_ID, 'status': constants.STATUS_CREATING, 'provider_location': None, } CDOT_SNAPSHOT = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': False, 'owners': set(), } CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': True, 'owners': {'volume clone'}, } CDOT_SNAPSHOT_BUSY_SNAPMIRROR = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': True, 'owners': {'snapmirror'}, } CDOT_CLONE_CHILD_1 = 'fake_child_1' CDOT_CLONE_CHILD_2 = 'fake_child_2' CDOT_CLONE_CHILDREN = [ {'name': CDOT_CLONE_CHILD_1}, {'name': CDOT_CLONE_CHILD_2}, ] SHARE_FOR_CG1 = { 'id': SHARE_ID, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share_1', 'share_proto': 'NFS', 'source_cgsnapshot_member_id': None, } SHARE_FOR_CG2 = { 'id': SHARE_ID2, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share_2', 'share_proto': 'NFS', 'source_cgsnapshot_member_id': None, } # Clone dest of SHARE_FOR_CG1 SHARE_FOR_CG3 = { 'id': SHARE_ID3, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share3', 'share_proto': 'NFS', 'source_cgsnapshot_member_id': CG_SNAPSHOT_MEMBER_ID1, } # Clone dest of SHARE_FOR_CG2 SHARE_FOR_CG4 = { 'id': SHARE_ID4, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share4', 'share_proto': 'NFS', 'source_cgsnapshot_member_id': CG_SNAPSHOT_MEMBER_ID2, } EMPTY_CONSISTENCY_GROUP = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [], } CONSISTENCY_GROUP = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [SHARE_FOR_CG1, SHARE_FOR_CG2], } CONSISTENCY_GROUP_DEST = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [SHARE_FOR_CG3, SHARE_FOR_CG4], } CG_SNAPSHOT_MEMBER_1 = { 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_SNAPSHOT_MEMBER_ID1, 'share_id': SHARE_ID, 'share_proto': 'NFS', } CG_SNAPSHOT_MEMBER_2 = { 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_SNAPSHOT_MEMBER_ID2, 'share_id': SHARE_ID2, 'share_proto': 'NFS', } CG_SNAPSHOT = { 'cgsnapshot_members': [CG_SNAPSHOT_MEMBER_1, CG_SNAPSHOT_MEMBER_2], 'consistency_group': CONSISTENCY_GROUP, 'consistency_group_id': CONSISTENCY_GROUP_ID, 'id': CG_SNAPSHOT_ID, 'project_id': TENANT_ID, } COLLATED_CGSNAPSHOT_INFO = [ { 'share': SHARE_FOR_CG3, 'snapshot': { 'share_id': SHARE_ID, 'id': CG_SNAPSHOT_ID } }, { 'share': SHARE_FOR_CG4, 'snapshot': { 'share_id': SHARE_ID2, 'id': CG_SNAPSHOT_ID } }, ] LIF_NAMES = [] LIF_ADDRESSES = ['10.10.10.10', '10.10.10.20'] LIFS = ( {'address': LIF_ADDRESSES[0], 'home-node': CLUSTER_NODES[0], 'home-port': 'e0c', 'interface-name': 'os_132dbb10-9a36-46f2-8d89-3d909830c356', 'netmask': NETWORK_INFO_NETMASK, 'role': 'data', 'vserver': VSERVER1 }, {'address': LIF_ADDRESSES[1], 'home-node': CLUSTER_NODES[1], 'home-port': 'e0c', 'interface-name': 'os_7eabdeed-bad2-46ea-bd0f-a33884c869e0', 'netmask': NETWORK_INFO_NETMASK, 'role': 'data', 'vserver': VSERVER1 }, ) INTERFACE_ADDRESSES_WITH_METADATA = { LIF_ADDRESSES[0]: { 'is_admin_only': False, 'preferred': True, }, LIF_ADDRESSES[1]: { 'is_admin_only': True, 'preferred': False, }, } NFS_EXPORTS = [ { 'path': ':'.join([LIF_ADDRESSES[0], 'fake_export_path']), 'is_admin_only': False, 'metadata': { 'preferred': True, }, }, { 'path': ':'.join([LIF_ADDRESSES[1], 'fake_export_path']), 'is_admin_only': True, 'metadata': { 'preferred': False, }, }, ] SHARE_ACCESS = { 'access_type': 'user', 'access_to': [LIF_ADDRESSES[0]] } EMS_MESSAGE = { 'computer-name': 'fake_host', 'event-id': '0', 'event-source': 'fake_driver', 'app-version': 'fake_app_version', 'category': 'fake_category', 'event-description': 'fake_description', 'log-level': '6', 'auto-support': 'false' } AGGREGATE_CAPACITIES = { AGGREGATES[0]: { 'available': 1181116007, # 1.1 GB 'total': 3543348020, # 3.3 GB 'used': 2362232013, # 2.2 GB }, AGGREGATES[1]: { 'available': 2147483648, # 2.0 GB 'total': 6442450944, # 6.0 GB 'used': 4294967296, # 4.0 GB } } AGGREGATE_CAPACITIES_VSERVER_CREDS = { AGGREGATES[0]: { 'available': 1181116007, # 1.1 GB }, AGGREGATES[1]: { 'available': 2147483648, # 2.0 GB } } SSC_INFO = { AGGREGATES[0]: { 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL' }, AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD' } } POOLS = [ {'pool_name': AGGREGATES[0], 'total_capacity_gb': 3.3, 'free_capacity_gb': 1.1, 'allocated_capacity_gb': 2.2, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL' }, {'pool_name': AGGREGATES[1], 'total_capacity_gb': 6.0, 'free_capacity_gb': 2.0, 'allocated_capacity_gb': 4.0, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD' }, ] POOLS_VSERVER_CREDS = [ {'pool_name': AGGREGATES[0], 'total_capacity_gb': 'unknown', 'free_capacity_gb': 1.1, 'allocated_capacity_gb': 0.0, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL' }, {'pool_name': AGGREGATES[1], 'total_capacity_gb': 'unknown', 'free_capacity_gb': 2.0, 'allocated_capacity_gb': 0.0, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD' }, ] SSC_RAID_TYPES = { AGGREGATES[0]: 'raid4', AGGREGATES[1]: 'raid_dp' } SSC_DISK_TYPES = { AGGREGATES[0]: 'FCAL', AGGREGATES[1]: 'SSD' } def get_config_cmode(): config = na_fakes.create_configuration_cmode() config.local_conf.set_override('share_backend_name', BACKEND_NAME) config.netapp_login = CLIENT_KWARGS['username'] config.netapp_password = CLIENT_KWARGS['password'] config.netapp_server_hostname = CLIENT_KWARGS['hostname'] config.netapp_transport_type = CLIENT_KWARGS['transport_type'] config.netapp_server_port = CLIENT_KWARGS['port'] config.netapp_volume_name_template = VOLUME_NAME_TEMPLATE config.netapp_aggregate_name_search_pattern = AGGREGATE_NAME_SEARCH_PATTERN config.netapp_vserver_name_template = VSERVER_NAME_TEMPLATE config.netapp_root_volume_aggregate = ROOT_VOLUME_AGGREGATE config.netapp_root_volume = ROOT_VOLUME config.netapp_lif_name_template = LIF_NAME_TEMPLATE config.netapp_volume_snapshot_reserve_percent = 8 config.netapp_vserver = VSERVER1 return config manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/0000775000567000056710000000000012701407265026022 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/__init__.py0000664000567000056710000000000012701407107030114 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_api.py0000664000567000056710000002222312701407107030200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for NetApp API layer """ import ddt import mock from six.moves import urllib from manila import exception from manila.share.drivers.netapp.dataontap.client import api from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake class NetAppApiElementTransTests(test.TestCase): """Test case for NetApp API element translations.""" def test_translate_struct_dict_unique_key(self): """Tests if dict gets properly converted to NaElements.""" root = api.NaElement('root') child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} root.translate_struct(child) self.assertEqual(3, len(root.get_children())) for key, value in child.items(): self.assertEqual(value, root.get_child_content(key)) def test_translate_struct_dict_nonunique_key(self): """Tests if list/dict gets properly converted to NaElements.""" root = api.NaElement('root') child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] root.translate_struct(child) children = root.get_children() self.assertEqual(3, len(children)) for c in children: if c.get_name() == 'e1': self.assertIn(c.get_content(), ['v1', 'v3']) else: self.assertEqual('v2', c.get_content()) def test_translate_struct_list(self): """Tests if list gets properly converted to NaElements.""" root = api.NaElement('root') child = ['e1', 'e2'] root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_struct_tuple(self): """Tests if tuple gets properly converted to NaElements.""" root = api.NaElement('root') child = ('e1', 'e2') root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_invalid_struct(self): """Tests if invalid data structure raises exception.""" root = api.NaElement('root') child = 'random child element' self.assertRaises(ValueError, root.translate_struct, child) def test_setter_builtin_types(self): """Tests str, int, float get converted to NaElement.""" update = dict(e1='v1', e2='1', e3='2.0', e4='8') root = api.NaElement('root') for key, value in update.items(): root[key] = value for key, value in update.items(): self.assertEqual(value, root.get_child_content(key)) def test_setter_na_element(self): """Tests na_element gets appended as child.""" root = api.NaElement('root') root['e1'] = api.NaElement('nested') self.assertEqual(1, len(root.get_children())) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), api.NaElement) def test_setter_child_dict(self): """Tests dict is appended as child to root.""" root = api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, api.NaElement) sub_ch = e1.get_children() self.assertEqual(2, len(sub_ch)) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual('v1', c.get_content()) else: self.assertEqual('v2', c.get_content()) def test_setter_child_list_tuple(self): """Tests list/tuple are appended as child to root.""" root = api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') l = root.get_child_by_name('l') self.assertIsInstance(l, api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, api.NaElement) self.assertEqual(2, len(l.get_children())) for le in l.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) self.assertEqual(2, len(t.get_children())) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2']) def test_setter_no_value(self): """Tests key with None value.""" root = api.NaElement('root') root['k'] = None self.assertIsNone(root.get_child_content('k')) def test_setter_invalid_value(self): """Tests invalid value raises exception.""" self.assertRaises(TypeError, api.NaElement('root').__setitem__, 'k', api.NaServer('localhost')) def test_setter_invalid_key(self): """Tests invalid value raises exception.""" self.assertRaises(KeyError, api.NaElement('root').__setitem__, None, 'value') @ddt.ddt class NetAppApiServerTests(test.TestCase): """Test case for NetApp API server methods""" def setUp(self): self.root = api.NaServer('127.0.0.1') super(NetAppApiServerTests, self).setUp() @ddt.data(None, fake.FAKE_XML_STR) def test_invoke_elem_value_error(self, na_element): """Tests whether invalid NaElement parameter causes error""" self.assertRaises(ValueError, self.root.invoke_elem, na_element) def test_invoke_elem_http_error(self): """Tests handling of HTTPError""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', fake.FAKE_NA_ELEMENT))) self.mock_object(api, 'LOG') self.root._opener = fake.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', mock.Mock( side_effect=urllib.error.HTTPError(url='', hdrs='', fp=None, code='401', msg='httperror'))) self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) def test_invoke_elem_urlerror(self): """Tests handling of URLError""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', fake.FAKE_NA_ELEMENT))) self.mock_object(api, 'LOG') self.root._opener = fake.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', mock.Mock( side_effect=urllib.error.URLError(reason='urlerror'))) self.assertRaises(exception.StorageCommunicationException, self.root.invoke_elem, na_element) def test_invoke_elem_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', fake.FAKE_NA_ELEMENT))) self.mock_object(api, 'LOG') self.root._opener = fake.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', mock.Mock( side_effect=Exception)) exception = self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) self.assertEqual('unknown', exception.code) def test_invoke_elem_valid(self): """Tests the method invoke_elem with valid parameters""" na_element = fake.FAKE_NA_ELEMENT self.root._trace = True self.mock_object(self.root, '_create_request', mock.Mock( return_value=('abc', fake.FAKE_NA_ELEMENT))) self.mock_object(api, 'LOG') self.root._opener = fake.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root, '_get_result', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) opener_mock = self.mock_object( self.root._opener, 'open', mock.Mock()) opener_mock.read.side_effect = ['resp1', 'resp2'] self.root.invoke_elem(na_element) self.assertEqual(2, api.LOG.debug.call_count) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py0000664000567000056710000055342312701407107032067 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import time import ddt import mock from oslo_log import log import six from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp.dataontap.client import client_cmode from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake @ddt.ddt class NetAppClientCmodeTestCase(test.TestCase): def setUp(self): super(NetAppClientCmodeTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(client_cmode.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(client_cmode.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(client_cmode.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) self.client = client_cmode.NetAppCmodeClient(**fake.CONNECTION_INFO) self.client.connection = mock.MagicMock() self.vserver_client = client_cmode.NetAppCmodeClient( **fake.CONNECTION_INFO) self.vserver_client.set_vserver(fake.VSERVER_NAME) self.vserver_client.connection = mock.MagicMock() def _mock_api_error(self, code='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) def test_init_features_ontapi_1_21(self): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) self.client._init_features() self.assertFalse(self.client.features.BROADCAST_DOMAINS) self.assertFalse(self.client.features.IPSPACES) self.assertFalse(self.client.features.SUBNETS) @ddt.data((1, 30), (1, 40), (2, 0)) def test_init_features_ontapi_1_30(self, ontapi_version): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=ontapi_version)) self.client._init_features() self.assertTrue(self.client.features.BROADCAST_DOMAINS) self.assertTrue(self.client.features.IPSPACES) self.assertTrue(self.client.features.SUBNETS) def test_invoke_vserver_api(self): self.client._invoke_vserver_api('fake-api', 'fake_vserver') self.client.connection.set_vserver.assert_has_calls( [mock.call('fake_vserver')]) self.client.connection.invoke_successfully.assert_has_calls( [mock.call('fake-api', True)]) def test_has_records(self): self.assertTrue(self.client._has_records( netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE))) def test_has_records_not_found(self): self.assertFalse(self.client._has_records( netapp_api.NaElement(fake.NO_RECORDS_RESPONSE))) @ddt.data((fake.VSERVER_GET_ITER_RESPONSE, 1), (fake.NO_RECORDS_RESPONSE, 0)) @ddt.unpack def test_get_record_count(self, response, expected): api_response = netapp_api.NaElement(response) result = self.client._get_record_count(api_response) self.assertEqual(expected, result) def test_get_records_count_invalid(self): api_response = netapp_api.NaElement( fake.INVALID_GET_ITER_RESPONSE_NO_RECORDS) self.assertRaises(exception.NetAppException, self.client._get_record_count, api_response) def test_send_iter_request(self): api_responses = [ netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1), netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2), netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3), ] mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(side_effect=api_responses)) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('28', num_records) next_tag = result.get_child_content('next-tag') self.assertEqual('', next_tag) args1 = copy.deepcopy(storage_disk_get_iter_args) args1['max-records'] = 10 args2 = copy.deepcopy(storage_disk_get_iter_args) args2['max-records'] = 10 args2['tag'] = 'next_tag_1' args3 = copy.deepcopy(storage_disk_get_iter_args) args3['max-records'] = 10 args3['tag'] = 'next_tag_2' mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args1), mock.call('storage-disk-get-iter', args2), mock.call('storage-disk-get-iter', args3), ]) def test_send_iter_request_single_page(self): api_response = netapp_api.NaElement( fake.STORAGE_DISK_GET_ITER_RESPONSE) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('1', num_records) args = copy.deepcopy(storage_disk_get_iter_args) args['max-records'] = 10 mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args), ]) def test_send_iter_request_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.send_iter_request('storage-disk-get-iter') num_records = result.get_child_content('num-records') self.assertEqual('0', num_records) args = {'max-records': client_cmode.DEFAULT_MAX_PAGE_LENGTH} mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args), ]) @ddt.data(fake.INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES, fake.INVALID_GET_ITER_RESPONSE_NO_RECORDS) def test_send_iter_request_invalid(self, fake_response): api_response = netapp_api.NaElement(fake_response) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.send_iter_request, 'storage-disk-get-iter') def test_set_vserver(self): self.client.set_vserver(fake.VSERVER_NAME) self.client.connection.set_vserver.assert_has_calls( [mock.call('fake_vserver')]) def test_vserver_exists(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) vserver_get_args = { 'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}}, 'desired-attributes': {'vserver-info': {'vserver-name': None}} } result = self.client.vserver_exists(fake.VSERVER_NAME) self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_args)]) self.assertTrue(result) def test_vserver_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.vserver_exists(fake.VSERVER_NAME) self.assertFalse(result) def test_create_vserver_no_ipspace(self): self.mock_object(self.client, 'send_request') vserver_create_args = { 'vserver-name': fake.VSERVER_NAME, 'root-volume-security-style': 'unix', 'root-volume-aggregate': fake.ROOT_VOLUME_AGGREGATE_NAME, 'root-volume': fake.ROOT_VOLUME_NAME, 'name-server-switch': {'nsswitch': 'file'} } vserver_modify_args = { 'aggr-list': [{'aggr-name': aggr_name} for aggr_name in fake.SHARE_AGGREGATE_NAMES], 'vserver-name': fake.VSERVER_NAME } self.client.create_vserver(fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, None) self.client.send_request.assert_has_calls([ mock.call('vserver-create', vserver_create_args), mock.call('vserver-modify', vserver_modify_args)]) def test_create_vserver_with_ipspace(self): self.client.features.add_feature('IPSPACES') self.mock_object(self.client, 'send_request') vserver_create_args = { 'vserver-name': fake.VSERVER_NAME, 'root-volume-security-style': 'unix', 'root-volume-aggregate': fake.ROOT_VOLUME_AGGREGATE_NAME, 'root-volume': fake.ROOT_VOLUME_NAME, 'name-server-switch': {'nsswitch': 'file'}, 'ipspace': fake.IPSPACE_NAME, } vserver_modify_args = { 'aggr-list': [{'aggr-name': aggr_name} for aggr_name in fake.SHARE_AGGREGATE_NAMES], 'vserver-name': fake.VSERVER_NAME } self.client.create_vserver(fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, fake.IPSPACE_NAME) self.client.send_request.assert_has_calls([ mock.call('vserver-create', vserver_create_args), mock.call('vserver-modify', vserver_modify_args)]) def test_create_vserver_ipspaces_not_supported(self): self.assertRaises(exception.NetAppException, self.client.create_vserver, fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, fake.IPSPACE_NAME) def test_get_vserver_root_volume_name(self): api_response = netapp_api.NaElement( fake.VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) vserver_get_args = { 'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}}, 'desired-attributes': {'vserver-info': {'root-volume': None}} } result = self.client.get_vserver_root_volume_name(fake.VSERVER_NAME) self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_args)]) self.assertEqual(fake.ROOT_VOLUME_NAME, result) def test_get_vserver_root_volume_name_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_vserver_root_volume_name, fake.VSERVER_NAME) def test_get_vserver_ipspace(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement( fake.VSERVER_GET_IPSPACE_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_ipspace(fake.VSERVER_NAME) vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-name': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'vserver-info': { 'ipspace': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertEqual(fake.IPSPACE_NAME, result) def test_get_vserver_ipspace_not_supported(self): result = self.client.get_vserver_ipspace(fake.IPSPACE_NAME) self.assertIsNone(result) def test_get_vserver_ipspace_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_vserver_ipspace, fake.IPSPACE_NAME) def test_ipspace_has_data_vservers(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) vserver_get_iter_args = { 'query': { 'vserver-info': { 'ipspace': fake.IPSPACE_NAME, 'vserver-type': 'data' }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertTrue(result) def test_ipspace_has_data_vservers_not_supported(self): result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) def test_ipspace_has_data_vservers_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) def test_list_vservers(self): api_response = netapp_api.NaElement( fake.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers() vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'data' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertListEqual([fake.VSERVER_NAME], result) def test_list_vservers_node_type(self): api_response = netapp_api.NaElement( fake.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers(vserver_type='node') vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'node' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertListEqual([fake.VSERVER_NAME], result) def test_list_vservers_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers(vserver_type='data') self.assertListEqual([], result) def test_get_vserver_volume_count(self): api_response = netapp_api.NaElement(fake.VOLUME_COUNT_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_volume_count() self.assertEqual(2, result) def test_delete_vserver_no_volumes(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=0)) self.mock_object(self.client, '_terminate_vserver_services') self.mock_object(self.client, 'send_request') self.client.delete_vserver( fake.VSERVER_NAME, self.vserver_client, security_services=[fake.CIFS_SECURITY_SERVICE]) self.client._terminate_vserver_services.assert_called_with( fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) def test_delete_vserver_one_volume(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume') self.mock_object(self.vserver_client, 'delete_volume') self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.vserver_client.offline_volume.assert_called_with( fake.ROOT_VOLUME_NAME) self.vserver_client.delete_volume.assert_called_with( fake.ROOT_VOLUME_NAME) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) def test_delete_vserver_one_volume_already_offline(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume', self._mock_api_error(code=netapp_api.EVOLUMEOFFLINE)) self.mock_object(self.vserver_client, 'delete_volume') self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.vserver_client.offline_volume.assert_called_with( fake.ROOT_VOLUME_NAME) self.vserver_client.delete_volume.assert_called_with( fake.ROOT_VOLUME_NAME) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_delete_vserver_one_volume_api_error(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume', self._mock_api_error()) self.mock_object(self.vserver_client, 'delete_volume') self.assertRaises(netapp_api.NaApiError, self.client.delete_vserver, fake.VSERVER_NAME, self.vserver_client) def test_delete_vserver_multiple_volumes(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=2)) self.assertRaises(exception.NetAppException, self.client.delete_vserver, fake.VSERVER_NAME, self.vserver_client) def test_delete_vserver_not_found(self): self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=False)) self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_terminate_vserver_services(self): self.mock_object(self.vserver_client, 'send_request') self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args)]) def test_terminate_vserver_services_cifs_not_found(self): self.mock_object(self.vserver_client, 'send_request', self._mock_api_error( code=netapp_api.EOBJECTNOTFOUND)) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args)]) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_terminate_vserver_services_api_error(self): side_effects = [netapp_api.NaApiError(code='fake'), None] self.mock_object(self.vserver_client, 'send_request', mock.Mock(side_effect=side_effects)) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args), mock.call('cifs-server-delete')]) self.assertEqual(0, client_cmode.LOG.error.call_count) def test_list_cluster_nodes(self): api_response = netapp_api.NaElement( fake.SYSTEM_NODE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_cluster_nodes() self.assertListEqual([fake.NODE_NAME], result) def test_list_cluster_nodes_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_cluster_nodes() self.assertListEqual([], result) def test_list_node_data_ports(self): self.mock_object(self.client, 'get_node_data_ports', mock.Mock(return_value=fake.SPEED_SORTED_PORTS)) result = self.client.list_node_data_ports(fake.NODE_NAME) self.assertSequenceEqual(fake.SPEED_SORTED_PORT_NAMES, result) def test_get_node_data_ports(self): api_response = netapp_api.NaElement(fake.NET_PORT_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_data_ports(fake.NODE_NAME) net_port_get_iter_args = { 'query': { 'net-port-info': { 'node': fake.NODE_NAME, 'link-status': 'up', 'port-type': 'physical|if_group', 'role': 'data', }, }, 'desired-attributes': { 'net-port-info': { 'port': None, 'node': None, 'operational-speed': None, 'ifgrp-port': None, }, }, } self.assertSequenceEqual(fake.SPEED_SORTED_PORTS, result) self.client.send_iter_request.assert_has_calls([ mock.call('net-port-get-iter', net_port_get_iter_args)]) def test_get_node_data_ports_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_data_ports(fake.NODE_NAME) self.assertSequenceEqual([], result) def test_sort_data_ports_by_speed(self): result = self.client._sort_data_ports_by_speed( fake.UNSORTED_PORTS_ALL_SPEEDS) self.assertSequenceEqual(fake.SORTED_PORTS_ALL_SPEEDS, result) def test_list_aggregates(self): api_response = netapp_api.NaElement(fake.AGGR_GET_NAMES_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_aggregates() self.assertSequenceEqual(fake.SHARE_AGGREGATE_NAMES, result) def test_list_aggregates_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.list_aggregates) def test_list_vserver_aggregates(self): self.mock_object(self.vserver_client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=fake.VSERVER_AGGREGATES)) result = self.vserver_client.list_vserver_aggregates() self.assertListEqual(list(fake.VSERVER_AGGREGATES.keys()), result) def test_list_vserver_aggregates_none_found(self): self.mock_object(self.vserver_client, 'get_vserver_aggregate_capacities', mock.Mock(return_value={})) result = self.vserver_client.list_vserver_aggregates() self.assertListEqual([], result) @ddt.data((True, True), (True, False), (False, True), (False, False)) @ddt.unpack def test_create_network_interface(self, broadcast_domains_supported, use_vlans): self.client.features.add_feature('BROADCAST_DOMAINS', broadcast_domains_supported) self.mock_object(self.client, '_ensure_broadcast_domain_for_port') self.mock_object(self.client, '_create_vlan') self.mock_object(self.client, 'send_request') lif_create_args = { 'address': fake.IP_ADDRESS, 'administrative-status': 'up', 'data-protocols': [ {'data-protocol': 'nfs'}, {'data-protocol': 'cifs'} ], 'home-node': fake.NODE_NAME, 'home-port': fake.VLAN_PORT if use_vlans else fake.PORT, 'netmask': fake.NETMASK, 'interface-name': fake.LIF_NAME, 'role': 'data', 'vserver': fake.VSERVER_NAME, } self.client.create_network_interface(fake.IP_ADDRESS, fake.NETMASK, fake.VLAN if use_vlans else None, fake.NODE_NAME, fake.PORT, fake.VSERVER_NAME, fake.LIF_NAME, fake.IPSPACE_NAME) if use_vlans: self.client._create_vlan.assert_called_with( fake.NODE_NAME, fake.PORT, fake.VLAN) else: self.assertFalse(self.client._create_vlan.called) if broadcast_domains_supported: self.client._ensure_broadcast_domain_for_port.assert_called_with( fake.NODE_NAME, fake.VLAN_PORT if use_vlans else fake.PORT, ipspace=fake.IPSPACE_NAME) else: self.assertFalse( self.client._ensure_broadcast_domain_for_port.called) self.client.send_request.assert_has_calls([ mock.call('net-interface-create', lif_create_args)]) def test_create_vlan(self): self.mock_object(self.client, 'send_request') vlan_create_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-create', vlan_create_args)]) def test_create_vlan_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) vlan_create_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-create', vlan_create_args)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_create_vlan_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client._create_vlan, fake.NODE_NAME, fake.PORT, fake.VLAN) def test_ensure_broadcast_domain_for_port_domain_match(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, domain=fake.BROADCAST_DOMAIN, ipspace=fake.IPSPACE_NAME) self.client._get_broadcast_domain_for_port.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT)]) self.assertFalse(self.client._broadcast_domain_exists.called) self.assertFalse(self.client._create_broadcast_domain.called) self.assertFalse(self.client._add_port_to_broadcast_domain.called) def test_ensure_broadcast_domain_for_port_other_domain(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': 'other_domain', } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_remove_port_from_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, domain=fake.BROADCAST_DOMAIN, ipspace=fake.IPSPACE_NAME) self.client._get_broadcast_domain_for_port.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT)]) self.client._remove_port_from_broadcast_domain.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT, 'other_domain', fake.IPSPACE_NAME)]) self.client._broadcast_domain_exists.assert_has_calls([ mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME)]) self.assertFalse(self.client._create_broadcast_domain.called) self.client._add_port_to_broadcast_domain.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME)]) def test_ensure_broadcast_domain_for_port_no_domain(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': None, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=False)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_remove_port_from_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, domain=fake.BROADCAST_DOMAIN, ipspace=fake.IPSPACE_NAME) self.client._get_broadcast_domain_for_port.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT)]) self.assertFalse(self.client._remove_port_from_broadcast_domain.called) self.client._broadcast_domain_exists.assert_has_calls([ mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME)]) self.client._create_broadcast_domain.assert_has_calls([ mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME)]) self.client._add_port_to_broadcast_domain.assert_has_calls([ mock.call(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME)]) def test_get_broadcast_domain_for_port(self): api_response = netapp_api.NaElement( fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_port_get_iter_args = { 'query': { 'net-port-info': { 'node': fake.NODE_NAME, 'port': fake.PORT, }, }, 'desired-attributes': { 'net-port-info': { 'broadcast-domain': None, 'ipspace': None, }, }, } result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME, fake.PORT) expected = { 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ipspace': fake.IPSPACE_NAME, } self.client.send_iter_request.assert_has_calls([ mock.call('net-port-get-iter', net_port_get_iter_args)]) self.assertEqual(expected, result) def test_get_broadcast_domain_for_port_port_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client._get_broadcast_domain_for_port, fake.NODE_NAME, fake.PORT) def test_get_broadcast_domain_for_port_domain_not_found(self): api_response = netapp_api.NaElement( fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME, fake.PORT) expected = { 'broadcast-domain': None, 'ipspace': fake.IPSPACE_NAME, } self.assertEqual(expected, result) def test_broadcast_domain_exists(self): api_response = netapp_api.NaElement( fake.NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_get_iter_args = { 'query': { 'net-port-broadcast-domain-info': { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, }, }, 'desired-attributes': { 'net-port-broadcast-domain-info': None, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-port-broadcast-domain-get-iter', net_port_broadcast_domain_get_iter_args)]) self.assertTrue(result) def test_broadcast_domain_exists_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertFalse(result) def test_create_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._create_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, mtu=fake.MTU) net_port_broadcast_domain_create_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'mtu': fake.MTU, } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-create', net_port_broadcast_domain_create_args)]) def test_delete_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._delete_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_delete_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-destroy', net_port_broadcast_domain_delete_args)]) def test_delete_broadcast_domains_for_ipspace_not_found(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=[])) self.mock_object(self.client, '_delete_broadcast_domain') self.client._delete_broadcast_domains_for_ipspace(fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( ipspace_name=fake.IPSPACE_NAME) self.assertFalse(self.client._delete_broadcast_domain.called) def test_delete_broadcast_domains_for_ipspace(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=fake.IPSPACES)) self.mock_object(self.client, '_delete_broadcast_domain') self.client._delete_broadcast_domains_for_ipspace(fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( ipspace_name=fake.IPSPACE_NAME) self.client._delete_broadcast_domain.assert_called_once_with( fake.IPSPACES[0]['broadcast-domains'][0], fake.IPSPACE_NAME) def test_add_port_to_broadcast_domain(self): self.mock_object(self.client, 'send_request') add_port_to_broadcast_domain_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ports': { 'net-qualified-port-name': ':'.join([fake.NODE_NAME, fake.VLAN_PORT]) } } result = self.client._add_port_to_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-add-ports', add_port_to_broadcast_domain_args)]) def test_add_port_to_broadcast_domain_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error( code=netapp_api. E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN)) result = self.client._add_port_to_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertIsNone(result) def test_add_port_to_broadcast_domain_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client._add_port_to_broadcast_domain, fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_remove_port_from_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._remove_port_from_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_remove_ports_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ports': { 'net-qualified-port-name': ':'.join([fake.NODE_NAME, fake.VLAN_PORT]) } } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-remove-ports', net_port_broadcast_domain_remove_ports_args)]) def test_network_interface_exists(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ONE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'query': { 'net-interface-info': { 'address': fake.IP_ADDRESS, 'home-node': fake.NODE_NAME, 'home-port': fake.VLAN_PORT, 'netmask': fake.NETMASK, 'vserver': fake.VSERVER_NAME} }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.network_interface_exists( fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, fake.NETMASK, fake.VLAN) self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertTrue(result) def test_network_interface_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'query': { 'net-interface-info': { 'address': fake.IP_ADDRESS, 'home-node': fake.NODE_NAME, 'home-port': fake.PORT, 'netmask': fake.NETMASK, 'vserver': fake.VSERVER_NAME} }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.network_interface_exists( fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, fake.NETMASK, None) self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertFalse(result) def test_list_network_interfaces(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.list_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertSequenceEqual(fake.LIF_NAMES, result) def test_list_network_interfaces_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_network_interfaces() self.assertListEqual([], result) def test_get_network_interfaces(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', None)]) self.assertSequenceEqual(fake.LIFS, result) def test_get_network_interfaces_filtered_by_protocol(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE_NFS) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces(protocols=['NFS']) net_interface_get_args = { 'query': { 'net-interface-info': { 'data-protocols': { 'data-protocol': 'nfs', } } } } self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertListEqual(fake.NFS_LIFS, result) def test_get_network_interfaces_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', None)]) self.assertListEqual([], result) def test_delete_network_interface(self): self.mock_object(self.client, 'send_request') self.client.delete_network_interface(fake.LIF_NAME) net_interface_delete_args = { 'vserver': None, 'interface-name': fake.LIF_NAME } self.client.send_request.assert_has_calls([ mock.call('net-interface-delete', net_interface_delete_args)]) def test_get_ipspaces(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement( fake.NET_IPSPACES_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_ipspaces(ipspace_name=fake.IPSPACE_NAME) net_ipspaces_get_iter_args = { 'query': { 'net-ipspaces-info': { 'ipspace': fake.IPSPACE_NAME, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual(fake.IPSPACES, result) def test_get_ipspaces_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_ipspaces() net_ipspaces_get_iter_args = {} self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual([], result) def test_get_ipspaces_not_supported(self): self.mock_object(self.client, 'send_iter_request') result = self.client.get_ipspaces() self.assertFalse(self.client.send_iter_request.called) self.assertEqual([], result) @ddt.data((fake.NET_IPSPACES_GET_ITER_RESPONSE, True), (fake.NO_RECORDS_RESPONSE, False)) @ddt.unpack def test_ipspace_exists(self, api_response, expected): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(api_response) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_exists(fake.IPSPACE_NAME) net_ipspaces_get_iter_args = { 'query': { 'net-ipspaces-info': { 'ipspace': fake.IPSPACE_NAME, }, }, 'desired-attributes': { 'net-ipspaces-info': { 'ipspace': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual(expected, result) def test_ipspace_exists_not_supported(self): result = self.client.ipspace_exists(fake.IPSPACE_NAME) self.assertFalse(result) def test_create_ipspace(self): self.mock_object(self.client, 'send_request') self.client.create_ipspace(fake.IPSPACE_NAME) net_ipspaces_create_args = {'ipspace': fake.IPSPACE_NAME} self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-create', net_ipspaces_create_args)]) def test_delete_ipspace(self): mock_delete_broadcast_domains_for_ipspace = self.mock_object( self.client, '_delete_broadcast_domains_for_ipspace') self.mock_object(self.client, 'send_request') self.client.delete_ipspace(fake.IPSPACE_NAME) net_ipspaces_destroy_args = {'ipspace': fake.IPSPACE_NAME} mock_delete_broadcast_domains_for_ipspace.assert_called_once_with( fake.IPSPACE_NAME) self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-destroy', net_ipspaces_destroy_args)]) def test_add_vserver_to_ipspace(self): self.mock_object(self.client, 'send_request') self.client.add_vserver_to_ipspace(fake.IPSPACE_NAME, fake.VSERVER_NAME) net_ipspaces_assign_vserver_args = { 'ipspace': fake.IPSPACE_NAME, 'vserver': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-assign-vserver', net_ipspaces_assign_vserver_args)]) def test_get_node_for_aggregate(self): api_response = netapp_api.NaElement( fake.AGGR_GET_NODE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake.SHARE_AGGREGATE_NAME], desired_attributes=desired_attributes)]) self.assertEqual(fake.NODE_NAME, result) def test_get_node_for_aggregate_none_requested(self): result = self.client.get_node_for_aggregate(None) self.assertIsNone(result) def test_get_node_for_aggregate_api_not_found(self): self.mock_object(self.client, 'send_iter_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EAPINOTFOUND))) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertIsNone(result) def test_get_node_for_aggregate_api_error(self): self.mock_object(self.client, 'send_iter_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.get_node_for_aggregate, fake.SHARE_AGGREGATE_NAME) def test_get_node_for_aggregate_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertIsNone(result) def test_get_cluster_aggregate_capacities(self): api_response = netapp_api.NaElement( fake.AGGR_GET_SPACE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_cluster_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-available': None, 'size-total': None, 'size-used': None, } } } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=fake.SHARE_AGGREGATE_NAMES, desired_attributes=desired_attributes)]) expected = { fake.SHARE_AGGREGATE_NAMES[0]: { 'available': 45670400, 'total': 943718400, 'used': 898048000, }, fake.SHARE_AGGREGATE_NAMES[1]: { 'available': 4267659264, 'total': 7549747200, 'used': 3282087936, }, } self.assertDictEqual(expected, result) def test_get_cluster_aggregate_capacities_not_found(self): api_response = netapp_api.NaElement('none').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_cluster_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES) self.assertEqual({}, result) def test_get_cluster_aggregate_capacities_none_requested(self): result = self.client.get_cluster_aggregate_capacities([]) self.assertEqual({}, result) def test_get_vserver_aggregate_capacities(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities() vserver_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-aggr-info-list': { 'vserver-aggr-info': { 'aggr-name': None, 'aggr-availsize': None } } } } } self.vserver_client.send_request.assert_has_calls([ mock.call('vserver-get', vserver_args)]) self.assertDictEqual(fake.VSERVER_AGGREGATES, result) def test_get_vserver_aggregate_capacities_partial_request(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES[0]) expected = {fake.SHARE_AGGREGATE_NAMES[0]: fake.VSERVER_AGGREGATES[fake.SHARE_AGGREGATE_NAMES[0]]} self.assertDictEqual(expected, result) def test_get_vserver_aggregate_capacities_aggregate_not_found(self): api_response = netapp_api.NaElement( fake.VSERVER_GET_RESPONSE_NO_AGGREGATES) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities() self.assertDictEqual({}, result) self.assertEqual(1, client_cmode.LOG.warning.call_count) def test_get_vserver_aggregate_capacities_vserver_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.vserver_client.get_vserver_aggregate_capacities) def test_get_vserver_aggregate_capacities_none_requested(self): result = self.client.get_vserver_aggregate_capacities([]) self.assertEqual({}, result) def test_get_aggregates(self): api_response = netapp_api.NaElement(fake.AGGR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', {})]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_with_filters(self): api_response = netapp_api.NaElement(fake.AGGR_GET_SPACE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-total': None, 'size-available': None, } } } result = self.client._get_aggregates( aggregate_names=fake.SHARE_AGGREGATE_NAMES, desired_attributes=desired_attributes) aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES), } }, 'desired-attributes': desired_attributes } self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', {})]) self.assertListEqual([], result) def test_setup_security_services_ldap(self): self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'configure_ldap') self.client.setup_security_services([fake.LDAP_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.vserver_client.configure_ldap.assert_has_calls([ mock.call(fake.LDAP_SECURITY_SERVICE)]) def test_setup_security_services_active_directory(self): self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'configure_active_directory') self.client.setup_security_services([fake.CIFS_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.vserver_client.configure_active_directory.assert_has_calls([ mock.call(fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME)]) def test_setup_security_services_kerberos(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'create_kerberos_realm') self.mock_object(self.vserver_client, 'configure_kerberos') self.client.setup_security_services([fake.KERBEROS_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.client.create_kerberos_realm.assert_has_calls([ mock.call(fake.KERBEROS_SECURITY_SERVICE)]) self.vserver_client.configure_kerberos.assert_has_calls([ mock.call(fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME)]) def test_setup_security_services_invalid(self): self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.setup_security_services, [fake.INVALID_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) def test_enable_nfs(self): self.mock_object(self.client, 'send_request') self.client.enable_nfs() nfs_service_modify_args = {'is-nfsv40-enabled': 'true'} export_rule_create_args = { 'client-match': '0.0.0.0/0', 'policy-name': 'default', 'ro-rule': { 'security-flavor': 'any' }, 'rw-rule': { 'security-flavor': 'never' } } self.client.send_request.assert_has_calls([ mock.call('nfs-enable'), mock.call('nfs-service-modify', nfs_service_modify_args), mock.call('export-rule-create', export_rule_create_args)]) def test_configure_ldap(self): self.mock_object(self.client, 'send_request') self.client.configure_ldap(fake.LDAP_SECURITY_SERVICE) config_name = hashlib.md5( six.b(fake.LDAP_SECURITY_SERVICE['id'])).hexdigest() ldap_client_create_args = { 'ldap-client-config': config_name, 'servers': {'ip-address': fake.LDAP_SECURITY_SERVICE['server']}, 'tcp-port': '389', 'schema': 'RFC-2307', 'bind-password': fake.LDAP_SECURITY_SERVICE['password'] } ldap_config_create_args = { 'client-config': config_name, 'client-enabled': 'true' } self.client.send_request.assert_has_calls([ mock.call('ldap-client-create', ldap_client_create_args), mock.call('ldap-config-create', ldap_config_create_args)]) def test_configure_active_directory(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.client.configure_active_directory(fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME) cifs_server = ( fake.VSERVER_NAME[0:7] + '..' + fake.VSERVER_NAME[-6:]).upper() cifs_server_create_args = { 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'domain': fake.CIFS_SECURITY_SERVICE['domain'], } self.client.configure_dns.assert_called_with( fake.CIFS_SECURITY_SERVICE) self.client.send_request.assert_has_calls([ mock.call('cifs-server-create', cifs_server_create_args)]) def test_configure_active_directory_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.mock_object(self.client, 'configure_dns') self.assertRaises(exception.NetAppException, self.client.configure_active_directory, fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME) def test_create_kerberos_realm(self): self.mock_object(self.client, 'send_request') self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'config-name': fake.KERBEROS_SECURITY_SERVICE['id'], 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-port': '464', 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper() } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-create', kerberos_realm_create_args)]) def test_create_kerberos_realm_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'config-name': fake.KERBEROS_SECURITY_SERVICE['id'], 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-port': '464', 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper() } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-create', kerberos_realm_create_args)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_create_kerberos_realm_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.create_kerberos_realm, fake.KERBEROS_SECURITY_SERVICE) def test_configure_kerberos(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=['lif1', 'lif2'])) self.client.configure_kerberos( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) spn = self.client._get_kerberos_service_principal_name( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) kerberos_config_modify_args1 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif1', 'is-kerberos-enabled': 'true', 'service-principal-name': spn } kerberos_config_modify_args2 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif2', 'is-kerberos-enabled': 'true', 'service-principal-name': spn } self.client.configure_dns.assert_called_with( fake.KERBEROS_SECURITY_SERVICE) self.client.send_request.assert_has_calls([ mock.call('kerberos-config-modify', kerberos_config_modify_args1), mock.call('kerberos-config-modify', kerberos_config_modify_args2)]) def test_configure_kerberos_no_network_interfaces(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.client.configure_kerberos, fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) self.client.configure_dns.assert_called_with( fake.KERBEROS_SECURITY_SERVICE) def test_get_kerberos_service_principal_name(self): spn = self.client._get_kerberos_service_principal_name( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME ) self.assertEqual(fake.KERBEROS_SERVICE_PRINCIPAL_NAME, spn) def test_configure_dns_for_active_directory(self): self.mock_object(self.client, 'send_request') self.client.configure_dns(fake.CIFS_SECURITY_SERVICE) net_dns_create_args = { 'domains': {'string': fake.CIFS_SECURITY_SERVICE['domain']}, 'name-servers': { 'ip-address': fake.CIFS_SECURITY_SERVICE['dns_ip'] }, 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-create', net_dns_create_args)]) def test_configure_dns_for_kerberos(self): self.mock_object(self.client, 'send_request') self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) net_dns_create_args = { 'domains': {'string': fake.KERBEROS_SECURITY_SERVICE['domain']}, 'name-servers': { 'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip'] }, 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-create', net_dns_create_args)]) def test_configure_dns_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) net_dns_create_args = { 'domains': {'string': fake.KERBEROS_SECURITY_SERVICE['domain']}, 'name-servers': { 'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip'] }, 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-create', net_dns_create_args)]) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_configure_dns_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.configure_dns, fake.KERBEROS_SECURITY_SERVICE) def test_create_volume(self): self.mock_object(self.client, 'send_request') self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, 'size': '100g', 'volume': fake.SHARE_NAME, 'volume-type': 'rw', 'junction-path': '/%s' % fake.SHARE_NAME, } self.client.send_request.assert_called_once_with('volume-create', volume_create_args) def test_create_volume_with_extra_specs(self): self.mock_object(self.client, 'set_volume_max_files') self.mock_object(self.client, 'enable_dedup') self.mock_object(self.client, 'enable_compression') self.mock_object(self.client, 'send_request') self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, thin_provisioned=True, language='en-US', snapshot_policy='default', dedup_enabled=True, compression_enabled=True, max_files=5000, snapshot_reserve=15) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, 'size': '100g', 'volume': fake.SHARE_NAME, 'junction-path': '/%s' % fake.SHARE_NAME, 'space-reserve': 'none', 'language-code': 'en-US', 'volume-type': 'rw', 'snapshot-policy': 'default', 'percentage-snapshot-reserve': '15', } self.client.send_request.assert_called_with('volume-create', volume_create_args) self.client.set_volume_max_files.assert_called_once_with( fake.SHARE_NAME, fake.MAX_FILES) self.client.enable_dedup.assert_called_once_with(fake.SHARE_NAME) self.client.enable_compression.assert_called_once_with(fake.SHARE_NAME) def test_enable_dedup(self): self.mock_object(self.client, 'send_request') self.client.enable_dedup(fake.SHARE_NAME) sis_enable_args = {'path': '/vol/%s' % fake.SHARE_NAME} self.client.send_request.assert_called_once_with('sis-enable', sis_enable_args) def test_disable_dedup(self): self.mock_object(self.client, 'send_request') self.client.disable_dedup(fake.SHARE_NAME) sis_disable_args = {'path': '/vol/%s' % fake.SHARE_NAME} self.client.send_request.assert_called_once_with('sis-disable', sis_disable_args) def test_enable_compression(self): self.mock_object(self.client, 'send_request') self.client.enable_compression(fake.SHARE_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'enable-compression': 'true' } self.client.send_request.assert_called_once_with('sis-set-config', sis_set_config_args) def test_disable_compression(self): self.mock_object(self.client, 'send_request') self.client.disable_compression(fake.SHARE_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'enable-compression': 'false' } self.client.send_request.assert_called_once_with('sis-set-config', sis_set_config_args) def test_get_volume_efficiency_status(self): api_response = netapp_api.NaElement(fake.SIS_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_efficiency_status(fake.SHARE_NAME) sis_get_iter_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % fake.SHARE_NAME, }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('sis-get-iter', sis_get_iter_args)]) expected = {'dedupe': True, 'compression': True} self.assertDictEqual(expected, result) def test_get_volume_efficiency_status_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_efficiency_status(fake.SHARE_NAME) expected = {'dedupe': False, 'compression': False} self.assertDictEqual(expected, result) def test_set_volume_max_files(self): self.mock_object(self.client, 'send_request') self.client.set_volume_max_files(fake.SHARE_NAME, fake.MAX_FILES) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': fake.MAX_FILES, }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) def test_set_volume_name(self): self.mock_object(self.client, 'send_request') self.client.set_volume_name(fake.SHARE_NAME, 'new_name') volume_rename_api_args = { 'volume': fake.SHARE_NAME, 'new-volume-name': 'new_name', } self.client.send_request.assert_called_once_with( 'volume-rename', volume_rename_api_args) def test_manage_volume_no_optional_args(self): self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') self.client.manage_volume(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME, 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': {}, 'volume-language-attributes': {}, 'volume-snapshot-attributes': {}, 'volume-space-attributes': { 'space-guarantee': 'volume', }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, False, False) def test_manage_volume_all_optional_args(self): self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') self.client.manage_volume(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, thin_provisioned=True, snapshot_policy=fake.SNAPSHOT_POLICY_NAME, language=fake.LANGUAGE, dedup_enabled=True, compression_enabled=False, max_files=fake.MAX_FILES) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME, 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': fake.MAX_FILES, }, 'volume-language-attributes': { 'language': fake.LANGUAGE, }, 'volume-snapshot-attributes': { 'snapshot-policy': fake.SNAPSHOT_POLICY_NAME, }, 'volume-space-attributes': { 'space-guarantee': 'none', }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, True, False) @ddt.data( {'existing': (True, True), 'desired': (True, True)}, {'existing': (True, True), 'desired': (False, False)}, {'existing': (True, True), 'desired': (True, False)}, {'existing': (True, False), 'desired': (True, False)}, {'existing': (True, False), 'desired': (False, False)}, {'existing': (True, False), 'desired': (True, True)}, {'existing': (False, False), 'desired': (False, False)}, {'existing': (False, False), 'desired': (True, False)}, {'existing': (False, False), 'desired': (True, True)}, ) @ddt.unpack def test_update_volume_efficiency_attributes(self, existing, desired): existing_dedupe = existing[0] existing_compression = existing[1] desired_dedupe = desired[0] desired_compression = desired[1] self.mock_object( self.client, 'get_volume_efficiency_status', mock.Mock(return_value={'dedupe': existing_dedupe, 'compression': existing_compression})) mock_enable_compression = self.mock_object(self.client, 'enable_compression') mock_disable_compression = self.mock_object(self.client, 'disable_compression') mock_enable_dedup = self.mock_object(self.client, 'enable_dedup') mock_disable_dedup = self.mock_object(self.client, 'disable_dedup') self.client.update_volume_efficiency_attributes( fake.SHARE_NAME, desired_dedupe, desired_compression) if existing_dedupe == desired_dedupe: self.assertFalse(mock_enable_dedup.called) self.assertFalse(mock_disable_dedup.called) elif existing_dedupe and not desired_dedupe: self.assertFalse(mock_enable_dedup.called) self.assertTrue(mock_disable_dedup.called) elif not existing_dedupe and desired_dedupe: self.assertTrue(mock_enable_dedup.called) self.assertFalse(mock_disable_dedup.called) if existing_compression == desired_compression: self.assertFalse(mock_enable_compression.called) self.assertFalse(mock_disable_compression.called) elif existing_compression and not desired_compression: self.assertFalse(mock_enable_compression.called) self.assertTrue(mock_disable_compression.called) elif not existing_compression and desired_compression: self.assertTrue(mock_enable_compression.called) self.assertFalse(mock_disable_compression.called) def test_set_volume_size(self): api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.set_volume_size(fake.SHARE_NAME, 10) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'size': 10737418240, }, }, }, } self.client.send_request.assert_has_calls([ mock.call('volume-modify-iter', volume_modify_iter_args)]) def test_set_volume_size_api_error(self): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_ERROR_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(netapp_api.NaApiError, self.client.set_volume_size, fake.SHARE_NAME, 10) def test_volume_exists(self): api_response = netapp_api.NaElement(fake.VOLUME_GET_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.volume_exists(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None } } } } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertTrue(result) def test_volume_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertFalse(self.client.volume_exists(fake.SHARE_NAME)) def test_snapshot_exists(self): api_response = netapp_api.NaElement(fake.VOLUME_GET_NAME_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.snapshot_exists(fake.SNAPSHOT_NAME, fake.SHARE_NAME) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, } }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } } } } self.client.send_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) self.assertTrue(result) def test_snapshot_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertFalse(self.client.snapshot_exists(fake.SNAPSHOT_NAME, fake.SHARE_NAME)) @ddt.data({ 'api_response_xml': fake.SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE, 'raised_exception': exception.SnapshotUnavailable, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE, 'raised_exception': exception.NetAppException, }) @ddt.unpack def test_snapshot_exists_error(self, api_response_xml, raised_exception): api_response = netapp_api.NaElement(api_response_xml) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(raised_exception, self.client.snapshot_exists, fake.SNAPSHOT_NAME, fake.SHARE_NAME) def test_get_aggregate_for_volume(self): api_response = netapp_api.NaElement( fake.GET_AGGREGATE_FOR_VOLUME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_for_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'name': None } } } } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertEqual(fake.SHARE_AGGREGATE_NAME, result) def test_get_aggregate_for_volume_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_aggregate_for_volume, fake.SHARE_NAME) def test_volume_has_luns(self): api_response = netapp_api.NaElement(fake.LUN_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.volume_has_luns(fake.SHARE_NAME) lun_get_iter_args = { 'query': { 'lun-info': { 'volume': fake.SHARE_NAME, }, }, 'desired-attributes': { 'lun-info': { 'path': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('lun-get-iter', lun_get_iter_args)]) self.assertTrue(result) def test_volume_has_luns_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.volume_has_luns(fake.SHARE_NAME) self.assertFalse(result) def test_volume_has_junctioned_volumes(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME self.mock_object(self.client, 'get_volume_junction_path', mock.Mock(return_value=fake_junction_path)) result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': fake_junction_path + '/*', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertTrue(result) def test_volume_has_junctioned_volumes_no_junction_path(self): self.mock_object(self.client, 'get_volume_junction_path', mock.Mock(return_value='')) result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME) self.assertFalse(result) def test_volume_has_junctioned_volumes_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME self.mock_object(self.client, 'get_volume_junction_path', mock.Mock(return_value=fake_junction_path)) result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME) self.assertFalse(result) def test_get_volume_at_junction_path(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.get_volume_at_junction_path(fake_junction_path) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': fake_junction_path, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, }, 'volume-space-attributes': { 'size': None, } }, }, } expected = { 'aggregate': fake.SHARE_AGGREGATE_NAME, 'junction-path': fake_junction_path, 'name': fake.SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_at_junction_path_not_specified(self): result = self.client.get_volume_at_junction_path(None) self.assertIsNone(result) def test_get_volume_at_junction_path_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.get_volume_at_junction_path(fake_junction_path) self.assertIsNone(result) def test_get_volume_to_manage(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME, 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, }, 'volume-space-attributes': { 'size': None, } }, }, } expected = { 'aggregate': fake.SHARE_AGGREGATE_NAME, 'junction-path': '/%s' % fake.SHARE_NAME, 'name': fake.SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_to_manage_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME) self.assertIsNone(result) def test_create_volume_clone(self): self.mock_object(self.client, 'send_request') self.client.create_volume_clone(fake.SHARE_NAME, fake.PARENT_SHARE_NAME, fake.PARENT_SNAPSHOT_NAME) volume_clone_create_args = { 'volume': fake.SHARE_NAME, 'parent-volume': fake.PARENT_SHARE_NAME, 'parent-snapshot': fake.PARENT_SNAPSHOT_NAME, 'junction-path': '/%s' % fake.SHARE_NAME } self.client.send_request.assert_has_calls([ mock.call('volume-clone-create', volume_clone_create_args)]) @ddt.data(None, mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EVOL_CLONE_BEING_SPLIT))) def test_split_volume_clone(self, side_effect): self.mock_object( self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.client.split_volume_clone(fake.SHARE_NAME) volume_clone_split_args = {'volume': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-clone-split-start', volume_clone_split_args)]) def test_split_volume_clone_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client.split_volume_clone, fake.SHARE_NAME) def test_get_clone_children_for_snapshot(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_clone_children_for_snapshot( fake.SHARE_NAME, fake.SNAPSHOT_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'name': fake.SHARE_NAME, 'snapshot-name': fake.SNAPSHOT_NAME, }, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) expected = [ {'name': fake.CLONE_CHILD_1}, {'name': fake.CLONE_CHILD_2}, ] self.assertEqual(expected, result) def test_get_clone_children_for_snapshot_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_clone_children_for_snapshot( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertEqual([], result) def test_get_volume_junction_path(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_VOLUME_PATH_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_junction_path(fake.SHARE_NAME) volume_get_volume_path_args = { 'volume': fake.SHARE_NAME, 'is-style-cifs': 'false' } self.client.send_request.assert_has_calls([ mock.call('volume-get-volume-path', volume_get_volume_path_args)]) self.assertEqual(fake.VOLUME_JUNCTION_PATH, result) def test_get_volume_junction_path_cifs(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_junction_path(fake.SHARE_NAME, is_style_cifs=True) volume_get_volume_path_args = { 'volume': fake.SHARE_NAME, 'is-style-cifs': 'true' } self.client.send_request.assert_has_calls([ mock.call('volume-get-volume-path', volume_get_volume_path_args)]) self.assertEqual(fake.VOLUME_JUNCTION_PATH_CIFS, result) def test_mount_volume_default_junction_path(self): self.mock_object(self.client, 'send_request') self.client.mount_volume(fake.SHARE_NAME) volume_mount_args = { 'volume-name': fake.SHARE_NAME, 'junction-path': '/%s' % fake.SHARE_NAME, } self.client.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_mount_volume(self): self.mock_object(self.client, 'send_request') fake_path = '/fake_path' self.client.mount_volume(fake.SHARE_NAME, junction_path=fake_path) volume_mount_args = { 'volume-name': fake.SHARE_NAME, 'junction-path': fake_path, } self.client.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_offline_volume(self): self.mock_object(self.client, 'send_request') self.client.offline_volume(fake.SHARE_NAME) volume_offline_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-offline', volume_offline_args)]) def test_offline_volume_already_offline(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EVOLUMEOFFLINE))) self.client.offline_volume(fake.SHARE_NAME) volume_offline_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-offline', volume_offline_args)]) def test_offline_volume_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client.offline_volume, fake.SHARE_NAME) def test__unmount_volume(self): self.mock_object(self.client, 'send_request') self.client._unmount_volume(fake.SHARE_NAME) volume_unmount_args = { 'volume-name': fake.SHARE_NAME, 'force': 'false' } self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_force(self): self.mock_object(self.client, 'send_request') self.client._unmount_volume(fake.SHARE_NAME, force=True) volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'} self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_already_unmounted(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EVOL_NOT_MOUNTED))) self.client._unmount_volume(fake.SHARE_NAME, force=True) volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'} self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client._unmount_volume, fake.SHARE_NAME, force=True) def test_unmount_volume(self): self.mock_object(self.client, '_unmount_volume') self.client.unmount_volume(fake.SHARE_NAME) self.client._unmount_volume.assert_called_once_with(fake.SHARE_NAME, force=False) self.assertEqual(1, client_cmode.LOG.debug.call_count) self.assertEqual(0, client_cmode.LOG.warning.call_count) def test_unmount_volume_api_error(self): self.mock_object(self.client, '_unmount_volume', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(1, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode.LOG.debug.call_count) self.assertEqual(0, client_cmode.LOG.warning.call_count) def test_unmount_volume_with_retries(self): side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR, message='...job ID...')] * 5 side_effect.append(None) self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.client.unmount_volume(fake.SHARE_NAME) self.assertEqual(6, self.client._unmount_volume.call_count) self.assertEqual(1, client_cmode.LOG.debug.call_count) self.assertEqual(5, client_cmode.LOG.warning.call_count) def test_unmount_volume_with_max_retries(self): side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR, message='...job ID...')] * 30 self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.assertRaises(exception.NetAppException, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(10, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode.LOG.debug.call_count) self.assertEqual(10, client_cmode.LOG.warning.call_count) def test_delete_volume(self): self.mock_object(self.client, 'send_request') self.client.delete_volume(fake.SHARE_NAME) volume_destroy_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-destroy', volume_destroy_args)]) def test_create_snapshot(self): self.mock_object(self.client, 'send_request') self.client.create_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_create_args = { 'volume': fake.SHARE_NAME, 'snapshot': fake.SNAPSHOT_NAME } self.client.send_request.assert_has_calls([ mock.call('snapshot-create', snapshot_create_args)]) @ddt.data({ 'mock_return': fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE, 'expected': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'busy': False, 'owners': set(), } }, { 'mock_return': fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE, 'expected': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'busy': True, 'owners': {'volume clone'}, } }) @ddt.unpack def test_get_snapshot(self, mock_return, expected): api_response = netapp_api.NaElement(mock_return) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } self.client.send_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) self.assertDictEqual(expected, result) @ddt.data({ 'api_response_xml': fake.NO_RECORDS_RESPONSE, 'raised_exception': exception.SnapshotResourceNotFound, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE, 'raised_exception': exception.NetAppException, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE, 'raised_exception': exception.SnapshotUnavailable, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE, 'raised_exception': exception.NetAppException, }) @ddt.unpack def test_get_snapshot_error(self, api_response_xml, raised_exception): api_response = netapp_api.NaElement(api_response_xml) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(raised_exception, self.client.get_snapshot, fake.SHARE_NAME, fake.SNAPSHOT_NAME) def test_rename_snapshot(self): self.mock_object(self.client, 'send_request') self.client.rename_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME, 'new_snapshot_name') snapshot_rename_args = { 'volume': fake.SHARE_NAME, 'current-name': fake.SNAPSHOT_NAME, 'new-name': 'new_snapshot_name' } self.client.send_request.assert_has_calls([ mock.call('snapshot-rename', snapshot_rename_args)]) def test_delete_snapshot(self): self.mock_object(self.client, 'send_request') self.client.delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_delete_args = { 'volume': fake.SHARE_NAME, 'snapshot': fake.SNAPSHOT_NAME } self.client.send_request.assert_has_calls([ mock.call('snapshot-delete', snapshot_delete_args)]) def test_soft_delete_snapshot(self): mock_delete_snapshot = self.mock_object(self.client, 'delete_snapshot') mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot') self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(mock_rename_snapshot.called) def test_soft_delete_snapshot_api_error(self): mock_delete_snapshot = self.mock_object( self.client, 'delete_snapshot', self._mock_api_error()) mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot') self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_rename_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME, 'deleted_manila_' + fake.SNAPSHOT_NAME) def test_prune_deleted_snapshots(self): deleted_snapshots_map = { 'vserver1': [{ 'name': 'deleted_snap_1', 'volume': 'fake_volume_1', 'vserver': 'vserver1', }], 'vserver2': [{ 'name': 'deleted_snap_2', 'volume': 'fake_volume_2', 'vserver': 'vserver2', }], } mock_get_deleted_snapshots = self.mock_object( self.client, '_get_deleted_snapshots', mock.Mock(return_value=deleted_snapshots_map)) mock_delete_snapshot = self.mock_object( self.client, 'delete_snapshot', mock.Mock(side_effect=[None, netapp_api.NaApiError])) self.mock_object( copy, 'deepcopy', mock.Mock(return_value=self.client)) self.client.prune_deleted_snapshots() mock_get_deleted_snapshots.assert_called_once_with() mock_delete_snapshot.assert_has_calls([ mock.call('fake_volume_1', 'deleted_snap_1'), mock.call('fake_volume_2', 'deleted_snap_2'), ], any_order=True) def test_get_deleted_snapshots(self): api_response = netapp_api.NaElement( fake.SNAPSHOT_GET_ITER_DELETED_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_deleted_snapshots() snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': 'deleted_manila_*', 'busy': 'false', }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'vserver': None, 'volume': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) expected = { fake.VSERVER_NAME: [{ 'name': 'deleted_manila_' + fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, }], } self.assertDictEqual(expected, result) def test_create_cg_snapshot(self): mock_start_cg_snapshot = self.mock_object( self.client, '_start_cg_snapshot', mock.Mock(return_value=fake.CG_SNAPSHOT_ID)) mock_commit_cg_snapshot = self.mock_object( self.client, '_commit_cg_snapshot') self.client.create_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_start_cg_snapshot.assert_called_once_with( [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_commit_cg_snapshot.assert_called_once_with(fake.CG_SNAPSHOT_ID) def test_create_cg_snapshot_no_id(self): mock_start_cg_snapshot = self.mock_object( self.client, '_start_cg_snapshot', mock.Mock(return_value=None)) mock_commit_cg_snapshot = self.mock_object( self.client, '_commit_cg_snapshot') self.assertRaises(exception.NetAppException, self.client.create_cg_snapshot, [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_start_cg_snapshot.assert_called_once_with( [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) self.assertFalse(mock_commit_cg_snapshot.called) def test_start_cg_snapshot(self): self.mock_object(self.client, 'send_request') self.client._start_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) cg_start_args = { 'snapshot': fake.SNAPSHOT_NAME, 'timeout': 'relaxed', 'volumes': [ {'volume-name': fake.SHARE_NAME}, {'volume-name': fake.SHARE_NAME_2}, ], } self.client.send_request.assert_has_calls([ mock.call('cg-start', cg_start_args)]) def test_commit_cg_snapshot(self): self.mock_object(self.client, 'send_request') self.client._commit_cg_snapshot(fake.CG_SNAPSHOT_ID) cg_commit_args = {'cg-id': fake.CG_SNAPSHOT_ID} self.client.send_request.assert_has_calls([ mock.call('cg-commit', cg_commit_args)]) def test_create_cifs_share(self): self.mock_object(self.client, 'send_request') self.client.create_cifs_share(fake.SHARE_NAME) cifs_share_create_args = { 'path': '/%s' % fake.SHARE_NAME, 'share-name': fake.SHARE_NAME } self.client.send_request.assert_has_calls([ mock.call('cifs-share-create', cifs_share_create_args)]) def test_get_cifs_share_access(self): api_response = netapp_api.NaElement( fake.CIFS_SHARE_ACCESS_CONTROL_GET_ITER) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cifs_share_access(fake.SHARE_NAME) cifs_share_access_control_get_iter_args = { 'query': { 'cifs-share-access-control': { 'share': fake.SHARE_NAME, }, }, 'desired-attributes': { 'cifs-share-access-control': { 'user-or-group': None, 'permission': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('cifs-share-access-control-get-iter', cifs_share_access_control_get_iter_args)]) expected = { 'Administrator': 'full_control', 'Administrators': 'change', 'Power Users': 'read', 'Users': 'no_access', } self.assertDictEqual(expected, result) def test_get_cifs_share_access_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cifs_share_access(fake.SHARE_NAME) self.assertEqual({}, result) @ddt.data(True, False) def test_add_cifs_share_access(self, readonly): self.mock_object(self.client, 'send_request') self.client.add_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, readonly) cifs_share_access_control_create_args = { 'permission': 'read' if readonly else 'full_control', 'share': fake.SHARE_NAME, 'user-or-group': fake.USER_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-create', cifs_share_access_control_create_args)]) @ddt.data(True, False) def test_modify_cifs_share_access(self, readonly): self.mock_object(self.client, 'send_request') self.client.modify_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, readonly) cifs_share_access_control_modify_args = { 'permission': 'read' if readonly else 'full_control', 'share': fake.SHARE_NAME, 'user-or-group': fake.USER_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-modify', cifs_share_access_control_modify_args)]) def test_remove_cifs_share_access(self): self.mock_object(self.client, 'send_request') self.client.remove_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME) cifs_share_access_control_delete_args = { 'user-or-group': fake.USER_NAME, 'share': fake.SHARE_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-delete', cifs_share_access_control_delete_args)]) def test_remove_cifs_share(self): self.mock_object(self.client, 'send_request') self.client.remove_cifs_share(fake.SHARE_NAME) cifs_share_delete_args = {'share-name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('cifs-share-delete', cifs_share_delete_args)]) def test_add_nfs_export_rule(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=[])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_add_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False) self.assertFalse(mock_update_nfs_export_rule.called) def test_add_nfs_export_rule_single_existing(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=['1'])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) self.assertFalse(mock_add_nfs_export_rule.called) mock_update_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '1') mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, []) def test_add_nfs_export_rule_multiple_existing(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=['2', '4', '6'])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) self.assertFalse(mock_add_nfs_export_rule.called) mock_update_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '2') mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, ['4', '6']) @ddt.data({'readonly': False, 'rw_security_flavor': 'sys'}, {'readonly': True, 'rw_security_flavor': 'never'}) @ddt.unpack def test__add_nfs_export_rule(self, readonly, rw_security_flavor): self.mock_object(self.client, 'send_request') self.client._add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, readonly) export_rule_create_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'client-match': fake.IP_ADDRESS, 'ro-rule': { 'security-flavor': 'sys', }, 'rw-rule': { 'security-flavor': rw_security_flavor, }, 'super-user-security': { 'security-flavor': 'sys', }, } self.client.send_request.assert_has_calls( [mock.call('export-rule-create', export_rule_create_args)]) @ddt.data({'readonly': False, 'rw_security_flavor': 'sys', 'index': '2'}, {'readonly': True, 'rw_security_flavor': 'never', 'index': '4'}) @ddt.unpack def test_update_nfs_export_rule(self, readonly, rw_security_flavor, index): self.mock_object(self.client, 'send_request') self.client._update_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, readonly, index) export_rule_modify_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': index, 'client-match': fake.IP_ADDRESS, 'ro-rule': { 'security-flavor': 'sys', }, 'rw-rule': { 'security-flavor': rw_security_flavor, }, 'super-user-security': { 'security-flavor': 'sys', }, } self.client.send_request.assert_has_calls( [mock.call('export-rule-modify', export_rule_modify_args)]) def test_get_nfs_export_rule_indices(self): api_response = netapp_api.NaElement(fake.EXPORT_RULE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_nfs_export_rule_indices( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) export_rule_get_iter_args = { 'query': { 'export-rule-info': { 'policy-name': fake.EXPORT_POLICY_NAME, 'client-match': fake.IP_ADDRESS, }, }, 'desired-attributes': { 'export-rule-info': { 'vserver-name': None, 'policy-name': None, 'client-match': None, 'rule-index': None, }, }, } self.assertListEqual(['1', '3'], result) self.client.send_iter_request.assert_has_calls([ mock.call('export-rule-get-iter', export_rule_get_iter_args)]) def test_remove_nfs_export_rule(self): fake_indices = ['1', '3', '4'] mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=fake_indices)) mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') self.client.remove_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake_indices) def test_remove_nfs_export_rules(self): fake_indices = ['1', '3'] self.mock_object(self.client, 'send_request') self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME, fake_indices) self.client.send_request.assert_has_calls([ mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'}), mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '3'})]) def test_remove_nfs_export_rules_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME, ['1']) self.client.send_request.assert_has_calls([ mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'})]) def test_remove_nfs_export_rules_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client._remove_nfs_export_rules, fake.EXPORT_POLICY_NAME, ['1']) def test_clear_nfs_export_policy_for_volume(self): mock_set_nfs_export_policy_for_volume = self.mock_object( self.client, 'set_nfs_export_policy_for_volume') self.client.clear_nfs_export_policy_for_volume(fake.SHARE_NAME) mock_set_nfs_export_policy_for_volume.assert_called_once_with( fake.SHARE_NAME, 'default') def test_set_nfs_export_policy_for_volume(self): self.mock_object(self.client, 'send_request') self.client.set_nfs_export_policy_for_volume(fake.SHARE_NAME, fake.EXPORT_POLICY_NAME) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': fake.EXPORT_POLICY_NAME, }, }, }, } self.client.send_request.assert_has_calls([ mock.call('volume-modify-iter', volume_modify_iter_args)]) def test_get_nfs_export_policy_for_volume(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_EXPORT_POLICY_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_nfs_export_policy_for_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': None, }, }, }, } self.assertEqual(fake.EXPORT_POLICY_NAME, result) self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) def test_get_nfs_export_policy_for_volume_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_nfs_export_policy_for_volume, fake.SHARE_NAME) def test_create_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-create', export_policy_create_args)]) def test_create_nfs_export_policy_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-create', export_policy_create_args)]) def test_create_nfs_export_policy_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.create_nfs_export_policy, fake.EXPORT_POLICY_NAME) def test_soft_delete_nfs_export_policy(self): self.mock_object(self.client, 'delete_nfs_export_policy') self.mock_object(self.client, 'rename_nfs_export_policy') self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.delete_nfs_export_policy.assert_has_calls([ mock.call(fake.EXPORT_POLICY_NAME)]) self.assertFalse(self.client.rename_nfs_export_policy.called) def test_soft_delete_nfs_export_policy_api_error(self): self.mock_object(self.client, 'delete_nfs_export_policy', self._mock_api_error()) self.mock_object(self.client, 'rename_nfs_export_policy') self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.delete_nfs_export_policy.assert_has_calls([ mock.call(fake.EXPORT_POLICY_NAME)]) self.assertTrue(self.client.rename_nfs_export_policy.called) def test_delete_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-destroy', export_policy_destroy_args)]) def test_delete_nfs_export_policy_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-destroy', export_policy_destroy_args)]) def test_delete_nfs_export_policy_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.delete_nfs_export_policy, fake.EXPORT_POLICY_NAME) def test_rename_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.rename_nfs_export_policy(fake.EXPORT_POLICY_NAME, 'new_policy_name') export_policy_rename_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'new-policy-name': 'new_policy_name' } self.client.send_request.assert_has_calls([ mock.call('export-policy-rename', export_policy_rename_args)]) def test_prune_deleted_nfs_export_policies(self): # Mock client lest we not be able to see calls on its copy. self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy') self.client.prune_deleted_nfs_export_policies() self.assertTrue(self.client.delete_nfs_export_policy.called) self.client.delete_nfs_export_policy.assert_has_calls( [mock.call(policy) for policy in fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]]) def test_prune_deleted_nfs_export_policies_api_error(self): self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy', self._mock_api_error()) self.client.prune_deleted_nfs_export_policies() self.assertTrue(self.client.delete_nfs_export_policy.called) self.client.delete_nfs_export_policy.assert_has_calls( [mock.call(policy) for policy in fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]]) def test_get_deleted_nfs_export_policies(self): api_response = netapp_api.NaElement( fake.DELETED_EXPORT_POLICY_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_deleted_nfs_export_policies() export_policy_get_iter_args = { 'query': { 'export-policy-info': { 'policy-name': 'deleted_manila_*', }, }, 'desired-attributes': { 'export-policy-info': { 'policy-name': None, 'vserver': None, }, }, } self.assertSequenceEqual(fake.DELETED_EXPORT_POLICIES, result) self.client.send_iter_request.assert_has_calls([ mock.call('export-policy-get-iter', export_policy_get_iter_args)]) def test_get_ems_log_destination_vserver(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='admin') self.assertEqual(fake.ADMIN_VSERVER_NAME, result) def test_get_ems_log_destination_vserver_future(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(2, 0))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='admin') self.assertEqual(fake.ADMIN_VSERVER_NAME, result) def test_get_ems_log_destination_vserver_legacy(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 15))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.NODE_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='node') self.assertEqual(fake.NODE_VSERVER_NAME, result) def test_get_ems_log_destination_no_cluster_creds(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(side_effect=[[], [fake.VSERVER_NAME]])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data')]) self.assertEqual(fake.VSERVER_NAME, result) def test_get_ems_log_destination_vserver_not_found(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.client._get_ems_log_destination_vserver) mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data'), mock.call(vserver_type='node')]) def test_send_ems_log_message(self): # Mock client lest we not be able to see calls on its copy. self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_ems_log_destination_vserver', mock.Mock(return_value=fake.ADMIN_VSERVER_NAME)) self.mock_object(self.client, 'send_request') self.client.send_ems_log_message(fake.EMS_MESSAGE) self.client.send_request.assert_has_calls([ mock.call('ems-autosupport-log', fake.EMS_MESSAGE)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_send_ems_log_message_api_error(self): # Mock client lest we not be able to see calls on its copy. self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_ems_log_destination_vserver', mock.Mock(return_value=fake.ADMIN_VSERVER_NAME)) self.mock_object(self.client, 'send_request', self._mock_api_error()) self.client.send_ems_log_message(fake.EMS_MESSAGE) self.client.send_request.assert_has_calls([ mock.call('ems-autosupport-log', fake.EMS_MESSAGE)]) self.assertEqual(1, client_cmode.LOG.warning.call_count) def test_get_aggregate_raid_types(self): api_response = netapp_api.NaElement(fake.AGGR_GET_RAID_TYPE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_raid_types( fake.SHARE_AGGREGATE_NAMES) aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES), } }, 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, } } } } expected = { fake.SHARE_AGGREGATE_NAMES[0]: fake.SHARE_AGGREGATE_RAID_TYPES[0], fake.SHARE_AGGREGATE_NAMES[1]: fake.SHARE_AGGREGATE_RAID_TYPES[1] } self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_aggregate_raid_types_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_raid_types( fake.SHARE_AGGREGATE_NAMES) self.assertDictEqual({}, result) def test_get_aggregate_disk_types(self): api_response = netapp_api.NaElement( fake.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAMES) expected = { fake.SHARE_AGGREGATE_NAMES[0]: fake.SHARE_AGGREGATE_DISK_TYPE, fake.SHARE_AGGREGATE_NAMES[1]: fake.SHARE_AGGREGATE_DISK_TYPE } self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES), self.client.send_request.call_count) self.assertDictEqual(expected, result) def test_get_aggregate_disk_types_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAMES) self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES), self.client.send_request.call_count) self.assertDictEqual({}, result) def test_check_for_cluster_credentials(self): api_response = netapp_api.NaElement(fake.SYSTEM_NODE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.check_for_cluster_credentials() self.assertTrue(result) def test_check_for_cluster_credentials_not_cluster(self): self.mock_object(self.client, 'send_iter_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EAPINOTFOUND))) result = self.client.check_for_cluster_credentials() self.assertFalse(result) def test_check_for_cluster_credentials_api_error(self): self.mock_object(self.client, 'send_iter_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.check_for_cluster_credentials) def test_create_cluster_peer(self): self.mock_object(self.client, 'send_request') self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'], 'fake_user', 'fake_password', 'fake_passphrase') cluster_peer_create_args = { 'peer-addresses': [ {'remote-inet-address': 'fake_address_1'}, {'remote-inet-address': 'fake_address_2'}, ], 'user-name': 'fake_user', 'password': 'fake_password', 'passphrase': 'fake_passphrase', } self.client.send_request.assert_has_calls([ mock.call('cluster-peer-create', cluster_peer_create_args)]) def test_get_cluster_peers(self): api_response = netapp_api.NaElement( fake.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peers() cluster_peer_get_iter_args = {} self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) expected = [{ 'active-addresses': [ fake.CLUSTER_ADDRESS_1, fake.CLUSTER_ADDRESS_2 ], 'availability': 'available', 'cluster-name': fake.CLUSTER_NAME, 'cluster-uuid': 'fake_uuid', 'peer-addresses': [fake.CLUSTER_ADDRESS_1], 'remote-cluster-name': fake.REMOTE_CLUSTER_NAME, 'serial-number': 'fake_serial_number', 'timeout': '60', }] self.assertEqual(expected, result) def test_get_cluster_peers_single(self): api_response = netapp_api.NaElement( fake.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.client.get_cluster_peers(remote_cluster_name=fake.CLUSTER_NAME) cluster_peer_get_iter_args = { 'query': { 'cluster-peer-info': { 'remote-cluster-name': fake.CLUSTER_NAME, } }, } self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) def test_get_cluster_peers_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peers( remote_cluster_name=fake.CLUSTER_NAME) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_delete_cluster_peer(self): self.mock_object(self.client, 'send_request') self.client.delete_cluster_peer(fake.CLUSTER_NAME) cluster_peer_delete_args = {'cluster-name': fake.CLUSTER_NAME} self.client.send_request.assert_has_calls([ mock.call('cluster-peer-delete', cluster_peer_delete_args)]) def test_get_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') api_response = netapp_api.NaElement( fake.CLUSTER_PEER_POLICY_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peer_policy() expected = { 'is-unauthenticated-access-permitted': False, 'passphrase-minimum-length': 8 } self.assertEqual(expected, result) self.assertTrue(self.client.send_request.called) def test_get_cluster_peer_policy_not_supported(self): result = self.client.get_cluster_peer_policy() self.assertEqual({}, result) def test_set_cluster_peer_policy_not_supported(self): self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.send_request.called) def test_set_cluster_peer_policy_no_arguments(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.send_request.called) def test_set_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy( is_unauthenticated_access_permitted=True, passphrase_minimum_length=12) cluster_peer_policy_modify_args = { 'is-unauthenticated-access-permitted': 'true', 'passphrase-minlength': '12', } self.client.send_request.assert_has_calls([ mock.call('cluster-peer-policy-modify', cluster_peer_policy_modify_args)]) def test_create_vserver_peer(self): self.mock_object(self.client, 'send_request') self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_create_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', 'applications': [ {'vserver-peer-application': 'snapmirror'}, ], } self.client.send_request.assert_has_calls([ mock.call('vserver-peer-create', vserver_peer_create_args)]) def test_delete_vserver_peer(self): self.mock_object(self.client, 'send_request') self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_delete_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.send_request.assert_has_calls([ mock.call('vserver-peer-delete', vserver_peer_delete_args)]) def test_accept_vserver_peer(self): self.mock_object(self.client, 'send_request') self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_accept_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.send_request.assert_has_calls([ mock.call('vserver-peer-accept', vserver_peer_accept_args)]) def test_get_vserver_peers(self): api_response = netapp_api.NaElement( fake.VSERVER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) vserver_peer_get_iter_args = { 'query': { 'vserver-peer-info': { 'vserver': fake.VSERVER_NAME, 'peer-vserver': fake.VSERVER_NAME_2, } }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)]) expected = [{ 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_2', 'peer-state': 'peered', 'peer-cluster': 'fake_cluster' }] self.assertEqual(expected, result) def test_get_vserver_peers_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_ensure_snapmirror_v2(self): self.assertIsNone(self.client._ensure_snapmirror_v2()) def test_ensure_snapmirror_v2_not_supported(self): self.client.features.add_feature('SNAPMIRROR_V2', supported=False) self.assertRaises(exception.NetAppException, self.client._ensure_snapmirror_v2) @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'}, {'schedule': None, 'policy': None}) @ddt.unpack def test_create_snapmirror(self, schedule, policy): self.mock_object(self.client, 'send_request') self.client.create_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, schedule=schedule, policy=policy) snapmirror_create_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-type': 'data_protection', } if schedule: snapmirror_create_args['schedule'] = schedule if policy: snapmirror_create_args['policy'] = policy self.client.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_already_exists(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ERELATION_EXISTS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.create_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_create_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-type': 'data_protection', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) self.assertTrue(self.client.send_request.called) @ddt.data( { 'source_snapshot': 'fake_snapshot', 'transfer_priority': 'fake_priority' }, { 'source_snapshot': None, 'transfer_priority': None } ) @ddt.unpack def test_initialize_snapmirror(self, source_snapshot, transfer_priority): api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.initialize_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, source_snapshot=source_snapshot, transfer_priority=transfer_priority) snapmirror_initialize_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } if source_snapshot: snapmirror_initialize_args['source-snapshot'] = source_snapshot if transfer_priority: snapmirror_initialize_args['transfer-priority'] = transfer_priority self.client.send_request.assert_has_calls([ mock.call('snapmirror-initialize', snapmirror_initialize_args)]) expected = { 'operation-id': None, 'status': 'succeeded', 'jobid': None, 'error-code': None, 'error-message': None } self.assertEqual(expected, result) @ddt.data(True, False) def test_release_snapmirror(self, relationship_info_only): self.mock_object(self.client, 'send_request') self.client.release_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, relationship_info_only=relationship_info_only) snapmirror_release_args = { 'query': { 'snapmirror-destination-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-info-only': ('true' if relationship_info_only else 'false'), } } } self.client.send_request.assert_has_calls([ mock.call('snapmirror-release-iter', snapmirror_release_args)]) def test_quiesce_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.quiesce_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_quiesce_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) @ddt.data(True, False) def test_abort_snapmirror(self, clear_checkpoint): self.mock_object(self.client, 'send_request') self.client.abort_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, clear_checkpoint=clear_checkpoint) snapmirror_abort_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'clear-checkpoint': 'true' if clear_checkpoint else 'false', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_no_transfer_in_progress(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ENOTRANSFER_IN_PROGRESS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.abort_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_abort_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'clear-checkpoint': 'false', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_break_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.break_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_break_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-break', snapmirror_break_args)]) @ddt.data( { 'schedule': 'fake_schedule', 'policy': 'fake_policy', 'tries': 5, 'max_transfer_rate': 1024, }, { 'schedule': None, 'policy': None, 'tries': None, 'max_transfer_rate': None, } ) @ddt.unpack def test_modify_snapmirror(self, schedule, policy, tries, max_transfer_rate): self.mock_object(self.client, 'send_request') self.client.modify_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, schedule=schedule, policy=policy, tries=tries, max_transfer_rate=max_transfer_rate) snapmirror_modify_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } if schedule: snapmirror_modify_args['schedule'] = schedule if policy: snapmirror_modify_args['policy'] = policy if tries: snapmirror_modify_args['tries'] = tries if max_transfer_rate: snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate self.client.send_request.assert_has_calls([ mock.call('snapmirror-modify', snapmirror_modify_args)]) def test_update_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.update_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ETRANSFER_IN_PROGRESS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.update_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring_two(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EANOTHER_OP_ACTIVE)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.update_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_delete_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.delete_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_delete_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } } } self.client.send_request.assert_has_calls([ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) def test__get_snapmirrors(self): api_response = netapp_api.NaElement(fake.SNAPMIRROR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, } } result = self.client._get_snapmirrors( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(1, len(result)) def test__get_snapmirrors_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_snapmirrors() self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', {})]) self.assertEqual([], result) def test_get_snapmirrors(self): api_response = netapp_api.NaElement( fake.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = ['source-vserver', 'source-volume', 'destination-vserver', 'destination-volume', 'is-healthy', 'mirror-state', 'schedule'] result = self.client.get_snapmirrors( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, 'mirror-state': None, 'schedule': None, }, }, } expected = [{ 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'is-healthy': 'true', 'mirror-state': 'snapmirrored', 'schedule': 'daily', }] self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(expected, result) def test_resume_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.resume_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_not_quiesed(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ERELATION_NOT_QUIESCED)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.resume_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_resync_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.resync_snapmirror( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resync_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resync', snapmirror_resync_args)]) manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py0000664000567000056710000001405212701407107031700 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_log import log from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake @ddt.ddt class NetAppBaseClientTestCase(test.TestCase): def setUp(self): super(NetAppBaseClientTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(client_base.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO) self.client.connection = mock.MagicMock() self.connection = self.client.connection def test_get_ontapi_version(self): version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE) self.connection.invoke_successfully.return_value = version_response major, minor = self.client.get_ontapi_version(cached=False) self.assertEqual('1', major) self.assertEqual('19', minor) def test_get_ontapi_version_cached(self): self.connection.get_api_version.return_value = (1, 20) major, minor = self.client.get_ontapi_version() self.assertEqual(1, self.connection.get_api_version.call_count) self.assertEqual(1, major) self.assertEqual(20, minor) def test_get_system_version(self): version_response = netapp_api.NaElement( fake.SYSTEM_GET_VERSION_RESPONSE) self.connection.invoke_successfully.return_value = version_response result = self.client.get_system_version() self.assertEqual(fake.VERSION, result['version']) self.assertEqual(('8', '2', '1'), result['version-tuple']) def test_init_features(self): self.client._init_features() self.assertSetEqual(set(), self.client.features.defined_features) @ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name') def test_strip_xml_namespace(self, element): result = self.client._strip_xml_namespace(element) self.assertEqual('tag_name', result) def test_send_request(self): element = netapp_api.NaElement('fake-api') self.client.send_request('fake-api') self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertTrue(self.connection.invoke_successfully.call_args[0][1]) def test_send_request_no_tunneling(self): element = netapp_api.NaElement('fake-api') self.client.send_request('fake-api', enable_tunneling=False) self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertFalse(self.connection.invoke_successfully.call_args[0][1]) def test_send_request_with_args(self): element = netapp_api.NaElement('fake-api') api_args = {'arg1': 'data1', 'arg2': 'data2'} element.translate_struct(api_args) self.client.send_request('fake-api', api_args=api_args) self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertTrue(self.connection.invoke_successfully.call_args[0][1]) def test_get_licenses(self): api_response = netapp_api.NaElement(fake.LICENSE_V2_LIST_INFO_RESPONSE) self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) response = self.client.get_licenses() self.assertSequenceEqual(fake.LICENSES, response) def test_get_licenses_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises(netapp_api.NaApiError, self.client.get_licenses) self.assertEqual(1, client_base.LOG.error.call_count) def test_send_ems_log_message(self): self.assertRaises(NotImplementedError, self.client.send_ems_log_message, {}) @ddt.ddt class FeaturesTestCase(test.TestCase): def setUp(self): super(FeaturesTestCase, self).setUp() self.features = client_base.Features() def test_init(self): self.assertSetEqual(set(), self.features.defined_features) def test_add_feature_default(self): self.features.add_feature('FEATURE_1') self.assertTrue(self.features.FEATURE_1) self.assertIn('FEATURE_1', self.features.defined_features) @ddt.data(True, False) def test_add_feature(self, value): self.features.add_feature('FEATURE_2', value) self.assertEqual(value, self.features.FEATURE_2) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,)) def test_add_feature_type_error(self, value): self.assertRaises(TypeError, self.features.add_feature, 'FEATURE_3', value) self.assertNotIn('FEATURE_3', self.features.defined_features) def test_get_attr_missing(self): self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4') manila-2.0.0/manila/tests/share/drivers/netapp/dataontap/client/fakes.py0000664000567000056710000020245312701407107027466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import mock from six.moves import urllib from manila.share.drivers.netapp.dataontap.client import api CONNECTION_INFO = { 'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd' } CLUSTER_NAME = 'fake_cluster' REMOTE_CLUSTER_NAME = 'fake_cluster_2' CLUSTER_ADDRESS_1 = 'fake_cluster_address' CLUSTER_ADDRESS_2 = 'fake_cluster_address_2' VERSION = 'NetApp Release 8.2.1 Cluster-Mode: Fri Mar 21 14:25:07 PDT 2014' NODE_NAME = 'fake_node' VSERVER_NAME = 'fake_vserver' VSERVER_NAME_2 = 'fake_vserver_2' ADMIN_VSERVER_NAME = 'fake_admin_vserver' NODE_VSERVER_NAME = 'fake_node_vserver' ROOT_VOLUME_AGGREGATE_NAME = 'fake_root_aggr' ROOT_VOLUME_NAME = 'fake_root_volume' SHARE_AGGREGATE_NAME = 'fake_aggr1' SHARE_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') SHARE_AGGREGATE_RAID_TYPES = ('raid4', 'raid_dp') SHARE_AGGREGATE_DISK_TYPE = 'FCAL' SHARE_NAME = 'fake_share' SHARE_SIZE = '1000000000' SHARE_NAME_2 = 'fake_share_2' SNAPSHOT_NAME = 'fake_snapshot' CG_SNAPSHOT_ID = 'fake_cg_id' PARENT_SHARE_NAME = 'fake_parent_share' PARENT_SNAPSHOT_NAME = 'fake_parent_snapshot' MAX_FILES = 5000 LANGUAGE = 'fake_language' SNAPSHOT_POLICY_NAME = 'fake_snapshot_policy' EXPORT_POLICY_NAME = 'fake_export_policy' DELETED_EXPORT_POLICIES = { VSERVER_NAME: [ 'deleted_manila_fake_policy_1', 'deleted_manila_fake_policy_2', ], VSERVER_NAME_2: [ 'deleted_manila_fake_policy_3', ], } USER_NAME = 'fake_user' PORT = 'e0a' VLAN = '1001' VLAN_PORT = 'e0a-1001' IP_ADDRESS = '10.10.10.10' NETMASK = '255.255.255.0' NET_ALLOCATION_ID = 'fake_allocation_id' LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s' LIF_NAME = LIF_NAME_TEMPLATE % {'net_allocation_id': NET_ALLOCATION_ID} IPSPACE_NAME = 'fake_ipspace' BROADCAST_DOMAIN = 'fake_domain' MTU = 9000 SM_SOURCE_VSERVER = 'fake_source_vserver' SM_SOURCE_VOLUME = 'fake_source_volume' SM_DEST_VSERVER = 'fake_destination_vserver' SM_DEST_VOLUME = 'fake_destination_volume' IPSPACES = [{ 'uuid': 'fake_uuid', 'ipspace': IPSPACE_NAME, 'id': 'fake_id', 'broadcast-domains': ['OpenStack'], 'ports': [NODE_NAME + ':' + VLAN_PORT], 'vservers': [ IPSPACE_NAME, VSERVER_NAME, ] }] EMS_MESSAGE = { 'computer-name': 'fake_host', 'event-id': '0', 'event-source': 'fake driver', 'app-version': 'fake app version', 'category': 'fake category', 'event-description': 'fake description', 'log-level': '6', 'auto-support': 'false', } NO_RECORDS_RESPONSE = etree.XML(""" 0 """) PASSED_RESPONSE = etree.XML(""" """) INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML(""" 1 fake_tag """) INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML(""" fake_tag """) VSERVER_GET_ITER_RESPONSE = etree.XML(""" %(fake_vserver)s 1 """ % {'fake_vserver': VSERVER_NAME}) VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML(""" %(root_volume)s %(fake_vserver)s 1 """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) VSERVER_GET_IPSPACE_NAME_RESPONSE = etree.XML(""" %(ipspace)s %(fake_vserver)s 1 """ % {'ipspace': IPSPACE_NAME, 'fake_vserver': VSERVER_NAME}) VSERVER_GET_RESPONSE = etree.XML(""" %(aggr1)s %(aggr2)s 45678592 %(aggr1)s 6448431104 %(aggr2)s %(vserver)s """ % { 'vserver': VSERVER_NAME, 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VSERVER_DATA_LIST_RESPONSE = etree.XML(""" %(vserver)s data 1 """ % {'vserver': VSERVER_NAME}) VSERVER_AGGREGATES = { SHARE_AGGREGATE_NAMES[0]: { 'available': 45678592, }, SHARE_AGGREGATE_NAMES[1]: { 'available': 6448431104, }, } VSERVER_GET_RESPONSE_NO_AGGREGATES = etree.XML(""" %(vserver)s """ % {'vserver': VSERVER_NAME}) ONTAPI_VERSION_RESPONSE = etree.XML(""" 1 19 """) SYSTEM_GET_VERSION_RESPONSE = etree.XML(""" 1395426307 true %(version)s 8 2 1 """ % {'version': VERSION}) LICENSE_V2_LIST_INFO_RESPONSE = etree.XML(""" none Cluster Base License false cluster3 base 1-80-000008 license none NFS License false cluster3-01 nfs 1-81-0000000000000004082368507 license none CIFS License false cluster3-01 cifs 1-81-0000000000000004082368507 license none iSCSI License false cluster3-01 iscsi 1-81-0000000000000004082368507 license none FCP License false cluster3-01 fcp 1-81-0000000000000004082368507 license none SnapRestore License false cluster3-01 snaprestore 1-81-0000000000000004082368507 license none SnapMirror License false cluster3-01 snapmirror 1-81-0000000000000004082368507 license none FlexClone License false cluster3-01 flexclone 1-81-0000000000000004082368507 license none SnapVault License false cluster3-01 snapvault 1-81-0000000000000004082368507 license """) LICENSES = ( 'base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', 'snaprestore', 'snapvault' ) VOLUME_COUNT_RESPONSE = etree.XML(""" vol0 cluster3-01 %(root_volume)s %(fake_vserver)s 2 """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) CIFS_SECURITY_SERVICE = { 'type': 'active_directory', 'password': 'fake_password', 'user': 'fake_user', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', } LDAP_SECURITY_SERVICE = { 'type': 'ldap', 'password': 'fake_password', 'server': 'fake_server', 'id': 'fake_id', } KERBEROS_SECURITY_SERVICE = { 'type': 'kerberos', 'password': 'fake_password', 'user': 'fake_user', 'server': 'fake_server', 'id': 'fake_id', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', } KERBEROS_SERVICE_PRINCIPAL_NAME = 'nfs/fake-vserver.fake_domain@FAKE_DOMAIN' INVALID_SECURITY_SERVICE = { 'type': 'fake', } SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" %s 1 """ % NODE_NAME) NET_PORT_GET_ITER_RESPONSE = etree.XML(""" full full auto true true true up 00:0c:29:fc:04:d9 1500 %(node_name)s full none 10 e0a physical data full full auto true true true up 00:0c:29:fc:04:e3 1500 %(node_name)s full none 100 e0b physical data full full auto true true true up 00:0c:29:fc:04:ed 1500 %(node_name)s full none 1000 e0c physical data full full auto true true true up 00:0c:29:fc:04:f7 1500 %(node_name)s full none 10000 e0d physical data 4 """ % {'node_name': NODE_NAME}) SPEED_SORTED_PORTS = ( {'node': NODE_NAME, 'port': 'e0d', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'e0c', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'e0b', 'speed': '100'}, {'node': NODE_NAME, 'port': 'e0a', 'speed': '10'}, ) PORT_NAMES = ('e0a', 'e0b', 'e0c', 'e0d') SPEED_SORTED_PORT_NAMES = ('e0d', 'e0c', 'e0b', 'e0a') UNSORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port7'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, ) SORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port7'}, ) NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE = etree.XML(""" %(ipspace)s %(domain)s %(node)s %(port)s 1 """ % { 'domain': BROADCAST_DOMAIN, 'node': NODE_NAME, 'port': PORT, 'ipspace': IPSPACE_NAME, }) NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE = etree.XML(""" %(ipspace)s %(node)s %(port)s 1 """ % {'node': NODE_NAME, 'port': PORT, 'ipspace': IPSPACE_NAME}) NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE = etree.XML(""" %(domain)s %(ipspace)s 1 """ % {'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE_NAME}) NET_IPSPACES_GET_ITER_RESPONSE = etree.XML(""" OpenStack fake_id %(ipspace)s %(node)s:%(port)s fake_uuid %(ipspace)s %(vserver)s 1 """ % { 'ipspace': IPSPACE_NAME, 'node': NODE_NAME, 'port': VLAN_PORT, 'vserver': VSERVER_NAME }) NET_INTERFACE_GET_ITER_RESPONSE = etree.XML("""
    192.168.228.42
    ipv4 up %(node)s e0c none none system-defined disabled mgmt %(node)s e0c cluster_mgmt true true d3230112-7524-11e4-8608-123478563412 false %(netmask)s 24 up cluster_mgmt c192.168.228.0/24 system_defined cluster3
    192.168.228.43
    ipv4 up %(node)s e0d none system-defined nextavail mgmt %(node)s e0d mgmt1 true true 0ccc57cc-7525-11e4-8608-123478563412 false %(netmask)s 24 up node_mgmt n192.168.228.0/24 system_defined cluster3-01
    %(address)s
    ipv4 up %(node)s %(vlan)s nfs cifs none system-defined nextavail data %(node)s %(vlan)s %(lif)s false true db4d91b6-95d9-11e4-8608-123478563412 false %(netmask)s 24 up data d10.0.0.0/24 system_defined %(vserver)s
    3
    """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIF_NAMES = ('cluster_mgmt', 'mgmt1', LIF_NAME) NET_INTERFACE_GET_ITER_RESPONSE_NFS = etree.XML("""
    %(address)s
    ipv4 up %(node)s %(vlan)s nfs cifs none system-defined nextavail data %(node)s %(vlan)s %(lif)s false true db4d91b6-95d9-11e4-8608-123478563412 false %(netmask)s 24 up data d10.0.0.0/24 system_defined %(vserver)s
    1
    """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIFS = ( {'address': '192.168.228.42', 'home-node': NODE_NAME, 'home-port': 'e0c', 'interface-name': 'cluster_mgmt', 'netmask': NETMASK, 'role': 'cluster_mgmt', 'vserver': 'cluster3' }, {'address': '192.168.228.43', 'home-node': NODE_NAME, 'home-port': 'e0d', 'interface-name': 'mgmt1', 'netmask': NETMASK, 'role': 'node_mgmt', 'vserver': 'cluster3-01' }, {'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ) NFS_LIFS = [ {'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ] NET_INTERFACE_GET_ONE_RESPONSE = etree.XML(""" %(lif)s %(vserver)s 1 """ % {'lif': LIF_NAME, 'vserver': VSERVER_NAME}) AGGR_GET_NAMES_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 %(aggr2)s 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_SPACE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 45670400 943718400 898048000 %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 4267659264 7549747200 3282087936 %(aggr2)s 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_NODE_RESPONSE = etree.XML(""" %(node)s %(aggr)s 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'node': NODE_NAME }) AGGR_GET_ITER_RESPONSE = etree.XML(""" false 64_bit 1758646411 aggr 512 30384 96 30384 30384 30384 243191 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 3 cfo true false true false false false unmirrored online 1 true false /%(aggr1)s/plex0 normal,active block false false false /%(aggr1)s/plex0/rg0 0 0 0 on 16 raid_dp, normal raid_dp online false 0 0 true true 0 0 0 0 0 0 0 0 0 245760 0 95 45670400 943718400 898048000 0 898048000 897802240 1 0 0 %(aggr1)s 15863632-ea49-49a8-9c88-2bd2d57c6d7a cluster3-01 unknown false 64_bit 706602229 aggr 528 31142 96 31142 31142 31142 1945584 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 10 sfo false false true false false false unmirrored online 1 true false /%(aggr2)s/plex0 normal,active block false false false /%(aggr2)s/plex0/rg0 0 0 block false false false /%(aggr2)s/plex0/rg1 0 0 0 on 8 raid4, normal raid4 online false 0 0 true true 0 0 0 0 0 0 0 0 0 425984 0 15 6448431104 7549747200 1101316096 0 1101316096 1100890112 2 0 0 %(aggr2)s 2a741934-1aaf-42dd-93ca-aaf231be108a cluster3-01 not_striped 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VOLUME_GET_NAME_RESPONSE = etree.XML(""" %(volume)s %(vserver)s 1 """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_GET_VOLUME_PATH_RESPONSE = etree.XML(""" /%(volume)s """ % {'volume': SHARE_NAME}) VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE = etree.XML(""" \\%(volume)s """ % {'volume': SHARE_NAME}) VOLUME_JUNCTION_PATH = '/' + SHARE_NAME VOLUME_JUNCTION_PATH_CIFS = '\\' + SHARE_NAME VOLUME_MODIFY_ITER_RESPONSE = etree.XML(""" 0 1 %(volume)s %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_MODIFY_ITER_ERROR_RESPONSE = etree.XML(""" 160 Unable to set volume attribute "size" %(volume)s %(vserver)s 1 0 """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML(""" false %(snap)s %(volume)s %(vserver)s 1 """ % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML(""" true %(snap)s %(volume)s %(vserver)s volume clone 1 """ % {'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML(""" false %(snap)s %(volume)s %(vserver)s false %(snap)s %(root_volume)s %(admin_vserver)s 1 """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, 'root_volume': ROOT_VOLUME_NAME, 'admin_vserver': ADMIN_VSERVER_NAME, }) SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE = etree.XML(""" 0 13023 %(volume)s Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s". Reason: Volume not online. %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE = etree.XML(""" 0 99999 %(volume)s Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s". %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_MULTIDELETE_ERROR_RESPONSE = etree.XML(""" 13021 %(volume)s No such snapshot. """ % {'volume': SHARE_NAME}) SNAPSHOT_GET_ITER_DELETED_RESPONSE = etree.XML(""" deleted_manila_%(snap)s %(volume)s %(vserver)s 1 """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML(""" full_control %(volume)s Administrator manila_svm_cifs change %(volume)s Administrators manila_svm_cifs read %(volume)s Power Users manila_svm_cifs no_access %(volume)s Users manila_svm_cifs 4 """ % {'volume': SHARE_NAME}) NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20') NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML(""" """) NFS_EXPORTFS_LIST_RULES_2_RESPONSE = etree.XML(""" %(path)s 65534 false %(host1)s %(host2)s %(host1)s %(host2)s %(host1)s %(host2)s sys """ % { 'path': VOLUME_JUNCTION_PATH, 'host1': NFS_EXPORT_RULES[0], 'host2': NFS_EXPORT_RULES[1], }) AGGR_GET_RAID_TYPE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 %(raid_type1)s %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 %(raid_type2)s %(aggr2)s 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], 'raid_type1': SHARE_AGGREGATE_RAID_TYPES[0], 'raid_type2': SHARE_AGGREGATE_RAID_TYPES[1] }) STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" cluster3-01:v5.19 %s 1 """ % SHARE_AGGREGATE_DISK_TYPE) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML(""" cluster3-01:v4.16 cluster3-01:v4.17 cluster3-01:v4.18 cluster3-01:v4.19 cluster3-01:v4.20 cluster3-01:v4.21 cluster3-01:v4.22 cluster3-01:v4.24 cluster3-01:v4.25 cluster3-01:v4.26 next_tag_1 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML(""" cluster3-01:v4.27 cluster3-01:v4.28 cluster3-01:v4.29 cluster3-01:v4.32 cluster3-01:v5.16 cluster3-01:v5.17 cluster3-01:v5.18 cluster3-01:v5.19 cluster3-01:v5.20 cluster3-01:v5.21 next_tag_2 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" cluster3-01:v5.22 cluster3-01:v5.24 cluster3-01:v5.25 cluster3-01:v5.26 cluster3-01:v5.27 cluster3-01:v5.28 cluster3-01:v5.29 cluster3-01:v5.32 8 """) GET_AGGREGATE_FOR_VOLUME_RESPONSE = etree.XML(""" %(aggr)s %(share)s os_aa666789-5576-4835-87b7-868069856459 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'share': SHARE_NAME }) EXPORT_RULE_GET_ITER_RESPONSE = etree.XML(""" %(rule)s %(policy)s 3 manila_svm %(rule)s %(policy)s 1 manila_svm 2 """ % {'policy': EXPORT_POLICY_NAME, 'rule': IP_ADDRESS}) VOLUME_GET_EXPORT_POLICY_RESPONSE = etree.XML(""" %(policy)s %(volume)s manila_svm 1 """ % {'policy': EXPORT_POLICY_NAME, 'volume': SHARE_NAME}) DELETED_EXPORT_POLICY_GET_ITER_RESPONSE = etree.XML(""" %(policy1)s %(vserver)s %(policy2)s %(vserver)s %(policy3)s %(vserver2)s 2 """ % { 'vserver': VSERVER_NAME, 'vserver2': VSERVER_NAME_2, 'policy1': DELETED_EXPORT_POLICIES[VSERVER_NAME][0], 'policy2': DELETED_EXPORT_POLICIES[VSERVER_NAME][1], 'policy3': DELETED_EXPORT_POLICIES[VSERVER_NAME_2][0], }) LUN_GET_ITER_RESPONSE = etree.XML(""" /vol/%(volume)s/fakelun %(volume)s %(vserver)s 1 """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, }) VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE = etree.XML(""" fake_volume test 1 """) VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML(""" %(aggr)s /%(volume)s %(volume)s %(vserver)s rw %(size)s 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, 'size': SHARE_SIZE, }) CLONE_CHILD_1 = 'fake_child_1' CLONE_CHILD_2 = 'fake_child_2' VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE = etree.XML(""" %(clone1)s %(vserver)s %(clone2)s %(vserver)s 2 """ % { 'vserver': VSERVER_NAME, 'clone1': CLONE_CHILD_1, 'clone2': CLONE_CHILD_2, }) SIS_GET_ITER_RESPONSE = etree.XML(""" true /vol/%(volume)s enabled %(vserver)s """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, }) CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" %(addr1)s %(addr2)s available %(cluster)s fake_uuid %(addr1)s %(remote_cluster)s fake_serial_number 60 1 """ % { 'addr1': CLUSTER_ADDRESS_1, 'addr2': CLUSTER_ADDRESS_2, 'cluster': CLUSTER_NAME, 'remote_cluster': REMOTE_CLUSTER_NAME, }) CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML(""" false 8 """) VSERVER_PEER_GET_ITER_RESPONSE = etree.XML(""" snapmirror %(cluster)s peered %(vserver2)s %(vserver1)s 2 """ % { 'cluster': CLUSTER_NAME, 'vserver1': VSERVER_NAME, 'vserver2': VSERVER_NAME_2 }) SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" fake_destination_volume fake_destination_node fake_destination_vserver fake_snapshot 1442701782 false true 2187 109 1442701890 test:manila 1171456 initialize 0 snapmirrored fake_snapshot 1442701782 DPDefault v2 ea8bfcc6-5f1d-11e5-8446-123478563412 idle data_protection daily fake_source_volume fake_source_vserver fake_destination_vserver 1 """) SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML(""" fake_destination_vserver fake_destination_volume true snapmirrored daily fake_source_vserver fake_source_volume 1 """) SNAPMIRROR_INITIALIZE_RESULT = etree.XML(""" succeeded """) FAKE_VOL_XML = """ open123 online 0 0 0 false false """ FAKE_XML1 = """\ abc\ abc\ """ FAKE_XML2 = """somecontent""" FAKE_NA_ELEMENT = api.NaElement(etree.XML(FAKE_VOL_XML)) FAKE_INVOKE_DATA = 'somecontent' FAKE_XML_STR = 'abc' FAKE_API_NAME = 'volume-get-iter' FAKE_API_NAME_ELEMENT = api.NaElement(FAKE_API_NAME) FAKE_NA_SERVER_STR = '127.0.0.1' FAKE_NA_SERVER = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5.set_vfiler('filer') FAKE_NA_SERVER_API_1_5.set_api_version(1, 5) FAKE_NA_SERVER_API_1_14 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_14.set_vserver('server') FAKE_NA_SERVER_API_1_14.set_api_version(1, 14) FAKE_NA_SERVER_API_1_20 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_20.set_vfiler('filer') FAKE_NA_SERVER_API_1_20.set_vserver('server') FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) FAKE_QUERY = {'volume-attributes': None} FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes', 'volume-space-attributes', 'volume-state-attributes', 'volume-qos-attributes']} FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443), mock.call(8488)] FAKE_RESULT_API_ERR_REASON = api.NaElement('result') FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000') FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason') FAKE_RESULT_API_ERRNO_INVALID = api.NaElement('result') FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000') FAKE_RESULT_API_ERRNO_VALID = api.NaElement('result') FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956') FAKE_RESULT_SUCCESS = api.NaElement('result') FAKE_RESULT_SUCCESS.add_attr('status', 'passed') FAKE_HTTP_OPENER = urllib.request.build_opener() manila-2.0.0/manila/tests/share/drivers/netapp/fakes.py0000664000567000056710000000247112701407107024233 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.share import configuration as conf from manila.share import driver as manila_opts from manila.share.drivers.netapp import options as na_opts def create_configuration(): config = conf.Configuration(None) config.append_config_values(manila_opts.share_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) return config def create_configuration_cmode(): config = create_configuration() config.append_config_values(na_opts.netapp_support_opts) return config manila-2.0.0/manila/tests/share/drivers/ganesha/0000775000567000056710000000000012701407265022710 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/ganesha/__init__.py0000664000567000056710000000000012701407107025002 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/ganesha/test_utils.py0000664000567000056710000000546712701407107025470 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from manila.share.drivers.ganesha import utils as ganesha_utils from manila import test patch_test_dict1 = {'a': 1, 'b': {'c': 2}, 'd': 3, 'e': 4} patch_test_dict2 = {'a': 11, 'b': {'f': 5}, 'd': {'g': 6}} patch_test_dict3 = {'b': {'c': 22, 'h': {'i': 7}}, 'e': None} patch_test_dict_result = { 'a': 11, 'b': {'c': 22, 'f': 5, 'h': {'i': 7}}, 'd': {'g': 6}, 'e': None, } walk_test_dict = {'a': {'b': {'c': {'d': {'e': 'f'}}}}} walk_test_list = [('e', 'f')] class GaneshaUtilsTests(test.TestCase): """Tests Ganesha utility functions.""" def test_patch(self): ret = ganesha_utils.patch(patch_test_dict1, patch_test_dict2, patch_test_dict3) self.assertEqual(patch_test_dict_result, ret) def test_walk(self): ret = [elem for elem in ganesha_utils.walk(walk_test_dict)] self.assertEqual(walk_test_list, ret) def test_path_from(self): self.mock_object(os.path, 'abspath', lambda path: os.path.join('/foo/bar', path)) ret = ganesha_utils.path_from('baz.py', '../quux', 'tic/tac/toe') self.assertEqual('/foo/quux/tic/tac/toe', os.path.normpath(ret)) @ddt.ddt class SSHExecutorTestCase(test.TestCase): """Tests SSHExecutor.""" @ddt.data({'run_as_root': True, 'expected_prefix': 'sudo '}, {'run_as_root': False, 'expected_prefix': ''}) @ddt.unpack def test_call_ssh_exec_object_with_run_as_root( self, run_as_root, expected_prefix): with mock.patch.object(ganesha_utils.utils, 'SSHPool'): self.execute = ganesha_utils.SSHExecutor() fake_ssh_object = mock.Mock() self.mock_object(self.execute.pool, 'get', mock.Mock(return_value=fake_ssh_object)) self.mock_object(ganesha_utils.processutils, 'ssh_execute', mock.Mock(return_value=('', ''))) ret = self.execute('ls', run_as_root=run_as_root) self.assertEqual(('', ''), ret) self.execute.pool.get.assert_called_once_with() ganesha_utils.processutils.ssh_execute.assert_called_once_with( fake_ssh_object, expected_prefix + 'ls') manila-2.0.0/manila/tests/share/drivers/ganesha/test_manager.py0000664000567000056710000005361212701407107025735 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from oslo_serialization import jsonutils import six from manila import exception from manila.share.drivers.ganesha import manager from manila import test from manila import utils test_export_id = 101 test_name = 'fakefile' test_path = '/fakedir0/export.d/fakefile.conf' test_ganesha_cnf = """EXPORT { Export_Id = 101; CLIENT { Clients = ip1; } }""" test_dict_unicode = { u'EXPORT': { u'Export_Id': 101, u'CLIENT': {u'Clients': u"ip1"} } } test_dict_str = { 'EXPORT': { 'Export_Id': 101, 'CLIENT': {'Clients': "ip1"} } } manager_fake_kwargs = { 'ganesha_config_path': '/fakedir0/fakeconfig', 'ganesha_db_path': '/fakedir1/fake.db', 'ganesha_export_dir': '/fakedir0/export.d', 'ganesha_service_name': 'ganesha.fakeservice' } class GaneshaConfigTests(test.TestCase): """Tests Ganesha config file format convertor functions.""" ref_ganesha_cnf = """EXPORT { CLIENT { Clients = ip1; } Export_Id = 101; }""" @staticmethod def conf_mangle(*confs): """A "mangler" for the conf format. Its purpose is to transform conf data in a way so that semantically equivalent confs yield identical results. Besides this objective criteria, we seek a good trade-off between the following requirements: - low lossiness; - low code complexity. """ def _conf_mangle(conf): # split to expressions by the delimiter ";" # (braces are forced to be treated as expressions # by sandwiching them in ";"-s) conf = re.sub('[{}]', ';\g<0>;', conf).split(';') # whitespace-split expressions to tokens with # (equality is forced to be treated as token by # sandwiching in space) conf = map(lambda l: l.replace("=", " = ").split(), conf) # get rid of by-product empty lists (derived from superflouous # ";"-s that might have crept in due to "sandwiching") conf = map(lambda x: x, conf) # handle the non-deterministic order of confs conf = list(conf) conf.sort() return conf return (_conf_mangle(conf) for conf in confs) def test_conf2json(self): test_ganesha_cnf_with_comment = """EXPORT { # fake_export_block Export_Id = 101; CLIENT { Clients = ip1; } }""" ret = manager._conf2json(test_ganesha_cnf_with_comment) self.assertEqual(test_dict_unicode, jsonutils.loads(ret)) def test_parseconf_ganesha_cnf_input(self): ret = manager.parseconf(test_ganesha_cnf) self.assertEqual(test_dict_unicode, ret) def test_parseconf_json_input(self): ret = manager.parseconf(jsonutils.dumps(test_dict_str)) self.assertEqual(test_dict_unicode, ret) def test_dump_to_conf(self): ganesha_cnf = six.StringIO() manager._dump_to_conf(test_dict_str, ganesha_cnf) self.assertEqual(*self.conf_mangle(self.ref_ganesha_cnf, ganesha_cnf.getvalue())) def test_mkconf(self): ganesha_cnf = manager.mkconf(test_dict_str) self.assertEqual(*self.conf_mangle(self.ref_ganesha_cnf, ganesha_cnf)) class GaneshaManagerTestCase(test.TestCase): """Tests GaneshaManager.""" def instantiate_ganesha_manager(self, *args, **kwargs): with mock.patch.object( manager.GaneshaManager, 'get_export_id', return_value=100) as self.mock_get_export_id: with mock.patch.object( manager.GaneshaManager, 'reset_exports') as self.mock_reset_exports: with mock.patch.object( manager.GaneshaManager, 'restart_service') as self.mock_restart_service: return manager.GaneshaManager(*args, **kwargs) def setUp(self): super(GaneshaManagerTestCase, self).setUp() self._execute = mock.Mock(return_value=('', '')) self._manager = self.instantiate_ganesha_manager( self._execute, 'faketag', **manager_fake_kwargs) self.mock_object(utils, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_init(self): self.mock_object(self._manager, 'reset_exports') self.mock_object(self._manager, 'restart_service') self.assertEqual('/fakedir0/fakeconfig', self._manager.ganesha_config_path) self.assertEqual('faketag', self._manager.tag) self.assertEqual('/fakedir0/export.d', self._manager.ganesha_export_dir) self.assertEqual('/fakedir1/fake.db', self._manager.ganesha_db_path) self.assertEqual('ganesha.fakeservice', self._manager.ganesha_service) self.assertEqual( [mock.call('mkdir', '-p', self._manager.ganesha_export_dir), mock.call('mkdir', '-p', '/fakedir1'), mock.call('sqlite3', self._manager.ganesha_db_path, 'create table ganesha(key varchar(20) primary key, ' 'value int); insert into ganesha values("exportid", ' '100);', run_as_root=False, check_exit_code=False)], self._execute.call_args_list) self.mock_get_export_id.assert_called_once_with(bump=False) self.mock_reset_exports.assert_called_once_with() self.mock_restart_service.assert_called_once_with() def test_init_execute_error_log_message(self): fake_args = ('foo', 'bar') def raise_exception(*args, **kwargs): if args == fake_args: raise exception.GaneshaCommandFailure() test_execute = mock.Mock(side_effect=raise_exception) self.mock_object(manager.LOG, 'error') test_manager = self.instantiate_ganesha_manager( test_execute, 'faketag', **manager_fake_kwargs) self.assertRaises( exception.GaneshaCommandFailure, test_manager.execute, *fake_args, message='fakemsg') manager.LOG.error.assert_called_once_with( mock.ANY, {'tag': 'faketag', 'msg': 'fakemsg'}) def test_init_execute_error_no_log_message(self): fake_args = ('foo', 'bar') def raise_exception(*args, **kwargs): if args == fake_args: raise exception.GaneshaCommandFailure() test_execute = mock.Mock(side_effect=raise_exception) self.mock_object(manager.LOG, 'error') test_manager = self.instantiate_ganesha_manager( test_execute, 'faketag', **manager_fake_kwargs) self.assertRaises( exception.GaneshaCommandFailure, test_manager.execute, *fake_args, message='fakemsg', makelog=False) self.assertFalse(manager.LOG.error.called) def test_ganesha_export_dir(self): self.assertEqual( '/fakedir0/export.d', self._manager.ganesha_export_dir) def test_getpath(self): self.assertEqual( '/fakedir0/export.d/fakefile.conf', self._manager._getpath('fakefile')) def test_write_file(self): test_data = 'fakedata' self.mock_object(manager.pipes, 'quote', mock.Mock(side_effect=['fakedata', 'fakefile.conf.RANDOM'])) test_args = [ ('mktemp', '-p', '/fakedir0/export.d', '-t', 'fakefile.conf.XXXXXX'), ('sh', '-c', 'echo fakedata > fakefile.conf.RANDOM'), ('mv', 'fakefile.conf.RANDOM', test_path)] test_kwargs = { 'message': 'writing fakefile.conf.RANDOM' } def return_tmpfile(*args, **kwargs): if args == test_args[0]: return ('fakefile.conf.RANDOM\n', '') self.mock_object(self._manager, 'execute', mock.Mock(side_effect=return_tmpfile)) self._manager._write_file(test_path, test_data) self._manager.execute.assert_has_calls([ mock.call(*test_args[0]), mock.call(*test_args[1], **test_kwargs), mock.call(*test_args[2])]) manager.pipes.quote.assert_has_calls([ mock.call('fakedata'), mock.call('fakefile.conf.RANDOM')]) def test_write_conf_file(self): test_data = 'fakedata' self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_write_file') ret = self._manager._write_conf_file(test_name, test_data) self.assertEqual(test_path, ret) self._manager._getpath.assert_called_once_with(test_name) self._manager._write_file.assert_called_once_with( test_path, test_data) def test_mkindex(self): test_ls_output = 'INDEX.conf\nfakefile.conf\nfakefile.txt' test_index = '%include /fakedir0/export.d/fakefile.conf\n' self.mock_object(self._manager, 'execute', mock.Mock(return_value=(test_ls_output, ''))) self.mock_object(self._manager, '_write_conf_file') ret = self._manager._mkindex() self._manager.execute.assert_called_once_with( 'ls', '/fakedir0/export.d', run_as_root=False) self._manager._write_conf_file.assert_called_once_with( 'INDEX', test_index) self.assertIsNone(ret) def test_read_export_file(self): test_args = ('cat', test_path) test_kwargs = {'message': 'reading export fakefile'} self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, 'execute', mock.Mock(return_value=(test_ganesha_cnf,))) self.mock_object(manager, 'parseconf', mock.Mock(return_value=test_dict_unicode)) ret = self._manager._read_export_file(test_name) self._manager._getpath.assert_called_once_with(test_name) self._manager.execute.assert_called_once_with( *test_args, **test_kwargs) manager.parseconf.assert_called_once_with(test_ganesha_cnf) self.assertEqual(test_dict_unicode, ret) def test_write_export_file(self): self.mock_object(manager, 'mkconf', mock.Mock(return_value=test_ganesha_cnf)) self.mock_object(self._manager, '_write_conf_file', mock.Mock(return_value=test_path)) ret = self._manager._write_export_file(test_name, test_dict_str) manager.mkconf.assert_called_once_with(test_dict_str) self._manager._write_conf_file.assert_called_once_with( test_name, test_ganesha_cnf) self.assertEqual(test_path, ret) def test_write_export_file_error_incomplete_export_block(self): test_errordict = { u'EXPORT': { u'Export_Id': '@config', u'CLIENT': {u'Clients': u"'ip1','ip2'"} } } self.mock_object(manager, 'mkconf', mock.Mock(return_value=test_ganesha_cnf)) self.mock_object(self._manager, '_write_conf_file', mock.Mock(return_value=test_path)) self.assertRaises(exception.InvalidParameterValue, self._manager._write_export_file, test_name, test_errordict) self.assertFalse(manager.mkconf.called) self.assertFalse(self._manager._write_conf_file.called) def test_rm_export_file(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('', ''))) self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) ret = self._manager._rm_export_file(test_name) self._manager._getpath.assert_called_once_with(test_name) self._manager.execute.assert_called_once_with('rm', test_path) self.assertIsNone(ret) def test_dbus_send_ganesha(self): test_args = ('arg1', 'arg2') test_kwargs = {'key': 'value'} self.mock_object(self._manager, 'execute', mock.Mock(return_value=('', ''))) ret = self._manager._dbus_send_ganesha('fakemethod', *test_args, **test_kwargs) self._manager.execute.assert_called_once_with( 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', '/org/ganesha/nfsd/ExportMgr', 'org.ganesha.nfsd.exportmgr.fakemethod', *test_args, message='dbus call exportmgr.fakemethod', **test_kwargs) self.assertIsNone(ret) def test_remove_export_dbus(self): self.mock_object(self._manager, '_dbus_send_ganesha') ret = self._manager._remove_export_dbus(test_export_id) self._manager._dbus_send_ganesha.assert_called_once_with( 'RemoveExport', 'uint16:101') self.assertIsNone(ret) def test_add_export(self): self.mock_object(self._manager, '_write_export_file', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object(self._manager, '_mkindex') ret = self._manager.add_export(test_name, test_dict_str) self._manager._write_export_file.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') self._manager._mkindex.assert_called_once_with() self.assertIsNone(ret) def test_add_export_error_during_mkindex(self): self.mock_object(self._manager, '_write_export_file', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object( self._manager, '_mkindex', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_rm_export_file') self.mock_object(self._manager, '_remove_export_dbus') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export_file.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') self._manager._mkindex.assert_called_once_with() self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_export_id) def test_add_export_error_during_write_export_file(self): self.mock_object( self._manager, '_write_export_file', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object(self._manager, '_mkindex') self.mock_object(self._manager, '_rm_export_file') self.mock_object(self._manager, '_remove_export_dbus') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export_file.assert_called_once_with( test_name, test_dict_str) self.assertFalse(self._manager._dbus_send_ganesha.called) self._manager._mkindex.assert_called_once_with() self.assertFalse(self._manager._rm_export_file.called) self.assertFalse(self._manager._remove_export_dbus.called) def test_add_export_error_during_dbus_send_ganesha(self): self.mock_object(self._manager, '_write_export_file', mock.Mock(return_value=test_path)) self.mock_object( self._manager, '_dbus_send_ganesha', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_mkindex') self.mock_object(self._manager, '_rm_export_file') self.mock_object(self._manager, '_remove_export_dbus') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export_file.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertFalse(self._manager._remove_export_dbus.called) def test_remove_export(self): self.mock_object(self._manager, '_read_export_file', mock.Mock(return_value=test_dict_unicode)) methods = ('_remove_export_dbus', '_rm_export_file', '_mkindex') for method in methods: self.mock_object(self._manager, method) ret = self._manager.remove_export(test_name) self._manager._read_export_file.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_dict_unicode['EXPORT']['Export_Id']) self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertIsNone(ret) def test_remove_export_error_during_read_export_file(self): self.mock_object( self._manager, '_read_export_file', mock.Mock(side_effect=exception.GaneshaCommandFailure)) methods = ('_remove_export_dbus', '_rm_export_file', '_mkindex') for method in methods: self.mock_object(self._manager, method) self.assertRaises(exception.GaneshaCommandFailure, self._manager.remove_export, test_name) self._manager._read_export_file.assert_called_once_with(test_name) self.assertFalse(self._manager._remove_export_dbus.called) self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() def test_remove_export_error_during_remove_export_dbus(self): self.mock_object(self._manager, '_read_export_file', mock.Mock(return_value=test_dict_unicode)) self.mock_object( self._manager, '_remove_export_dbus', mock.Mock(side_effect=exception.GaneshaCommandFailure)) methods = ('_rm_export_file', '_mkindex') for method in methods: self.mock_object(self._manager, method) self.assertRaises(exception.GaneshaCommandFailure, self._manager.remove_export, test_name) self._manager._read_export_file.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_dict_unicode['EXPORT']['Export_Id']) self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() def test_get_export_id(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('exportid|101', ''))) ret = self._manager.get_export_id() self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'update ganesha set value = value + 1;' 'select * from ganesha where key = "exportid";', run_as_root=False) self.assertEqual(101, ret) def test_get_export_id_nobump(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('exportid|101', ''))) ret = self._manager.get_export_id(bump=False) self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'select * from ganesha where key = "exportid";', run_as_root=False) self.assertEqual(101, ret) def test_get_export_id_error_invalid_export_db(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('invalid', ''))) self.mock_object(manager.LOG, 'error') self.assertRaises(exception.InvalidSqliteDB, self._manager.get_export_id) manager.LOG.error.assert_called_once_with( mock.ANY, mock.ANY) self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'update ganesha set value = value + 1;' 'select * from ganesha where key = "exportid";', run_as_root=False) def test_restart_service(self): self.mock_object(self._manager, 'execute') ret = self._manager.restart_service() self._manager.execute.assert_called_once_with( 'service', 'ganesha.fakeservice', 'restart') self.assertIsNone(ret) def test_reset_exports(self): self.mock_object(self._manager, 'execute') self.mock_object(self._manager, '_mkindex') ret = self._manager.reset_exports() self._manager.execute.assert_called_once_with( 'sh', '-c', 'rm -f /fakedir0/export.d/*.conf') self._manager._mkindex.assert_called_once_with() self.assertIsNone(ret) manila-2.0.0/manila/tests/share/drivers/zfsonlinux/0000775000567000056710000000000012701407265023521 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/zfsonlinux/test_driver.py0000664000567000056710000022317512701407107026432 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_config import cfg from manila import context from manila import exception from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.zfsonlinux import driver as zfs_driver from manila import test CONF = cfg.CONF class FakeConfig(object): def __init__(self, *args, **kwargs): self.driver_handles_share_servers = False self.share_backend_name = 'FAKE_BACKEND_NAME' self.zfs_share_export_ip = kwargs.get( "zfs_share_export_ip", "1.1.1.1") self.zfs_service_ip = kwargs.get("zfs_service_ip", "2.2.2.2") self.zfs_zpool_list = kwargs.get( "zfs_zpool_list", ["foo", "bar/subbar", "quuz"]) self.zfs_use_ssh = kwargs.get("zfs_use_ssh", False) self.zfs_share_export_ip = kwargs.get( "zfs_share_export_ip", "240.241.242.243") self.zfs_service_ip = kwargs.get("zfs_service_ip", "240.241.242.244") self.ssh_conn_timeout = kwargs.get("ssh_conn_timeout", 123) self.zfs_ssh_username = kwargs.get( "zfs_ssh_username", 'fake_username') self.zfs_ssh_user_password = kwargs.get( "zfs_ssh_user_password", 'fake_pass') self.zfs_ssh_private_key_path = kwargs.get( "zfs_ssh_private_key_path", '/fake/path') self.zfs_replica_snapshot_prefix = kwargs.get( "zfs_replica_snapshot_prefix", "tmp_snapshot_for_replication_") self.zfs_dataset_creation_options = kwargs.get( "zfs_dataset_creation_options", ["fook=foov", "bark=barv"]) self.network_config_group = kwargs.get( "network_config_group", "fake_network_config_group") self.admin_network_config_group = kwargs.get( "admin_network_config_group", "fake_admin_network_config_group") self.config_group = kwargs.get("config_group", "fake_config_group") self.reserved_share_percentage = kwargs.get( "reserved_share_percentage", 0) self.max_over_subscription_ratio = kwargs.get( "max_over_subscription_ratio", 15.0) def safe_get(self, key): return getattr(self, key) def append_config_values(self, *args, **kwargs): pass class FakeDriverPrivateStorage(object): def __init__(self): self.storage = {} def update(self, entity_id, data): if entity_id not in self.storage: self.storage[entity_id] = {} self.storage[entity_id].update(data) def get(self, entity_id, key): return self.storage.get(entity_id, {}).get(key) def delete(self, entity_id): self.storage.pop(entity_id, None) @ddt.ddt class ZFSonLinuxShareDriverTestCase(test.TestCase): def setUp(self): self.mock_object(zfs_driver.CONF, '_check_required_opts') super(self.__class__, self).setUp() self._context = context.get_admin_context() self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor') self.configuration = FakeConfig() self.private_storage = FakeDriverPrivateStorage() self.driver = zfs_driver.ZFSonLinuxShareDriver( configuration=self.configuration, private_storage=self.private_storage) def test_init(self): self.assertTrue(hasattr(self.driver, 'replica_snapshot_prefix')) self.assertEqual( self.driver.replica_snapshot_prefix, self.configuration.zfs_replica_snapshot_prefix) self.assertEqual( self.driver.backend_name, self.configuration.share_backend_name) self.assertEqual( self.driver.zpool_list, ['foo', 'bar', 'quuz']) self.assertEqual( self.driver.dataset_creation_options, self.configuration.zfs_dataset_creation_options) self.assertEqual( self.driver.share_export_ip, self.configuration.zfs_share_export_ip) self.assertEqual( self.driver.service_ip, self.configuration.zfs_service_ip) self.assertEqual( self.driver.private_storage, self.private_storage) self.assertTrue(hasattr(self.driver, '_helpers')) self.assertEqual(self.driver._helpers, {}) for attr_name in ('execute', 'execute_with_retry', 'parse_zfs_answer', 'get_zpool_option', 'get_zfs_option', 'zfs'): self.assertTrue(hasattr(self.driver, attr_name)) def test_init_error_with_duplicated_zpools(self): configuration = FakeConfig( zfs_zpool_list=['foo', 'bar', 'foo/quuz']) self.assertRaises( exception.BadConfigurationException, zfs_driver.ZFSonLinuxShareDriver, configuration=configuration, private_storage=self.private_storage ) def test__setup_helpers(self): mock_import_class = self.mock_object( zfs_driver.importutils, 'import_class') self.configuration.zfs_share_helpers = ['FOO=foo.module.WithHelper'] result = self.driver._setup_helpers() self.assertIsNone(result) mock_import_class.assert_called_once_with('foo.module.WithHelper') mock_import_class.return_value.assert_called_once_with( self.configuration) self.assertEqual( self.driver._helpers, {'FOO': mock_import_class.return_value.return_value}) def test__setup_helpers_error(self): self.configuration.zfs_share_helpers = [] self.assertRaises( exception.BadConfigurationException, self.driver._setup_helpers) def test__get_share_helper(self): self.driver._helpers = {'FOO': 'BAR'} result = self.driver._get_share_helper('FOO') self.assertEqual('BAR', result) @ddt.data({}, {'foo': 'bar'}) def test__get_share_helper_error(self, share_proto): self.assertRaises( exception.InvalidShare, self.driver._get_share_helper, 'NFS') @ddt.data(True, False) def test_do_setup(self, use_ssh): self.mock_object(self.driver, '_setup_helpers') self.mock_object(self.driver, 'ssh_executor') self.configuration.zfs_use_ssh = use_ssh self.driver.do_setup('fake_context') self.driver._setup_helpers.assert_called_once_with() if use_ssh: self.assertEqual(4, self.driver.ssh_executor.call_count) else: self.assertEqual(3, self.driver.ssh_executor.call_count) @ddt.data( ('foo', '127.0.0.1'), ('127.0.0.1', 'foo'), ('256.0.0.1', '127.0.0.1'), ('::1/128', '127.0.0.1'), ('127.0.0.1', '::1/128'), ) @ddt.unpack def test_do_setup_error_on_ip_addresses_configuration( self, share_export_ip, service_ip): self.mock_object(self.driver, '_setup_helpers') self.driver.share_export_ip = share_export_ip self.driver.service_ip = service_ip self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, 'fake_context') self.driver._setup_helpers.assert_called_once_with() @ddt.data([], '', None) def test_do_setup_no_zpools_configured(self, zpool_list): self.mock_object(self.driver, '_setup_helpers') self.driver.zpool_list = zpool_list self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, 'fake_context') self.driver._setup_helpers.assert_called_once_with() @ddt.data(None, '', 'foo_replication_domain') def test__get_pools_info(self, replication_domain): self.mock_object( self.driver, 'get_zpool_option', mock.Mock(side_effect=['2G', '3G', '5G', '4G'])) self.configuration.replication_domain = replication_domain self.driver.zpool_list = ['foo', 'bar'] expected = [ {'pool_name': 'foo', 'total_capacity_gb': 3.0, 'free_capacity_gb': 2.0, 'reserved_percentage': 0, 'compression': [True, False], 'dedupe': [True, False], 'thin_provisioning': [True], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), 'qos': [False]}, {'pool_name': 'bar', 'total_capacity_gb': 4.0, 'free_capacity_gb': 5.0, 'reserved_percentage': 0, 'compression': [True, False], 'dedupe': [True, False], 'thin_provisioning': [True], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), 'qos': [False]}, ] if replication_domain: for pool in expected: pool['replication_type'] = 'readable' result = self.driver._get_pools_info() self.assertEqual(expected, result) self.driver.get_zpool_option.assert_has_calls([ mock.call('foo', 'free'), mock.call('foo', 'size'), mock.call('bar', 'free'), mock.call('bar', 'size'), ]) @ddt.data( ([], {'compression': [True, False], 'dedupe': [True, False]}), (['dedup=off'], {'compression': [True, False], 'dedupe': [False]}), (['dedup=on'], {'compression': [True, False], 'dedupe': [True]}), (['compression=on'], {'compression': [True], 'dedupe': [True, False]}), (['compression=off'], {'compression': [False], 'dedupe': [True, False]}), (['compression=fake'], {'compression': [True], 'dedupe': [True, False]}), (['compression=fake', 'dedup=off'], {'compression': [True], 'dedupe': [False]}), (['compression=off', 'dedup=on'], {'compression': [False], 'dedupe': [True]}), ) @ddt.unpack def test__init_common_capabilities( self, dataset_creation_options, expected_part): self.driver.dataset_creation_options = ( dataset_creation_options) expected = { 'thin_provisioning': [True], 'qos': [False], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), } expected.update(expected_part) self.driver._init_common_capabilities() self.assertEqual(expected, self.driver.common_capabilities) @ddt.data(None, '', 'foo_replication_domain') def test__update_share_stats(self, replication_domain): self.configuration.replication_domain = replication_domain self.mock_object(self.driver, '_get_pools_info') self.assertEqual({}, self.driver._stats) expected = { 'consistency_group_support': None, 'driver_handles_share_servers': False, 'driver_name': 'ZFS', 'driver_version': '1.0', 'free_capacity_gb': 'unknown', 'pools': self.driver._get_pools_info.return_value, 'qos': False, 'replication_domain': replication_domain, 'reserved_percentage': 0, 'share_backend_name': self.driver.backend_name, 'snapshot_support': True, 'storage_protocol': 'NFS', 'total_capacity_gb': 'unknown', 'vendor_name': 'Open Source', } if replication_domain: expected['replication_type'] = 'readable' self.driver._update_share_stats() self.assertEqual(expected, self.driver._stats) self.driver._get_pools_info.assert_called_once_with() @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz') def test__get_share_name(self, share_id): prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = prefix self.configuration.zfs_dataset_snapshot_name_prefix = 'quuz' expected = prefix + share_id.replace('-', '_') result = self.driver._get_share_name(share_id) self.assertEqual(expected, result) @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz') def test__get_snapshot_name(self, snapshot_id): prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = 'quuz' self.configuration.zfs_dataset_snapshot_name_prefix = prefix expected = prefix + snapshot_id.replace('-', '_') result = self.driver._get_snapshot_name(snapshot_id) self.assertEqual(expected, result) def test__get_dataset_creation_options_not_set(self): self.driver.dataset_creation_options = [] mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) share = {'size': '5'} result = self.driver._get_dataset_creation_options(share=share) self.assertIsInstance(result, list) self.assertEqual(2, len(result)) for v in ('quota=5G', 'readonly=off'): self.assertIn(v, result) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data(True, False) def test__get_dataset_creation_options(self, is_readonly): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) self.driver.dataset_creation_options = [ 'readonly=quuz', 'sharenfs=foo', 'sharesmb=bar', 'k=v', 'q=w', ] share = {'size': 5} readonly = 'readonly=%s' % ('on' if is_readonly else 'off') expected = [readonly, 'k=v', 'q=w', 'quota=5G'] result = self.driver._get_dataset_creation_options( share=share, is_readonly=is_readonly) self.assertEqual(sorted(expected), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( (' True', [True, False], ['dedup=off'], 'dedup=on'), ('True', [True, False], ['dedup=off'], 'dedup=on'), ('on', [True, False], ['dedup=off'], 'dedup=on'), ('yes', [True, False], ['dedup=off'], 'dedup=on'), ('1', [True, False], ['dedup=off'], 'dedup=on'), ('True', [True], [], 'dedup=on'), (' False', [True, False], [], 'dedup=off'), ('False', [True, False], [], 'dedup=off'), ('False', [False], ['dedup=on'], 'dedup=off'), ('off', [False], ['dedup=on'], 'dedup=off'), ('no', [False], ['dedup=on'], 'dedup=off'), ('0', [False], ['dedup=on'], 'dedup=off'), ) @ddt.unpack def test__get_dataset_creation_options_with_updated_dedupe( self, dedupe_extra_spec, dedupe_capability, driver_options, expected): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={'dedupe': dedupe_extra_spec})) self.driver.dataset_creation_options = driver_options self.driver.common_capabilities['dedupe'] = dedupe_capability share = {'size': 5} expected_options = ['quota=5G', 'readonly=off'] expected_options.append(expected) result = self.driver._get_dataset_creation_options(share=share) self.assertEqual(sorted(expected_options), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( ('on', [True, False], ['compression=off'], 'compression=on'), ('on', [True], [], 'compression=on'), ('off', [False], ['compression=on'], 'compression=off'), ('off', [True, False], [], 'compression=off'), ('foo', [True, False], [], 'compression=foo'), ('bar', [True], [], 'compression=bar'), ) @ddt.unpack def test__get_dataset_creation_options_with_updated_compression( self, extra_spec, capability, driver_options, expected_option): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={'zfsonlinux:compression': extra_spec})) self.driver.dataset_creation_options = driver_options self.driver.common_capabilities['compression'] = capability share = {'size': 5} expected_options = ['quota=5G', 'readonly=off'] expected_options.append(expected_option) result = self.driver._get_dataset_creation_options(share=share) self.assertEqual(sorted(expected_options), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( ({'dedupe': 'fake'}, {'dedupe': [True, False]}), ({'dedupe': 'on'}, {'dedupe': [False]}), ({'dedupe': 'off'}, {'dedupe': [True]}), ({'zfsonlinux:compression': 'fake'}, {'compression': [False]}), ({'zfsonlinux:compression': 'on'}, {'compression': [False]}), ({'zfsonlinux:compression': 'off'}, {'compression': [True]}), ) @ddt.unpack def test__get_dataset_creation_options_error( self, extra_specs, common_capabilities): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) share = {'size': 5} self.driver.common_capabilities.update(common_capabilities) self.assertRaises( exception.ZFSonLinuxException, self.driver._get_dataset_creation_options, share=share ) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data('bar/quuz', 'bar/quuz/', 'bar') def test__get_dataset_name(self, second_zpool): self.configuration.zfs_zpool_list = ['foo', second_zpool] prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = prefix share = {'id': 'abc-def_ghi', 'host': 'hostname@backend_name#bar'} result = self.driver._get_dataset_name(share) if second_zpool[-1] == '/': second_zpool = second_zpool[0:-1] expected = '%s/%sabc_def_ghi' % (second_zpool, prefix) self.assertEqual(expected, result) def test_create_share(self): mock_get_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, 'zfs') mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' dataset_name = 'bar/subbar/some_prefix_fake_share_id' result = self.driver.create_share(context, share, share_server=None) self.assertEqual( mock_get_helper.return_value.create_exports.return_value, result, ) self.assertEqual( 'share', self.driver.private_storage.get(share['id'], 'entity_type')) self.assertEqual( dataset_name, self.driver.private_storage.get(share['id'], 'dataset_name')) self.assertEqual( 'someuser@2.2.2.2', self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.assertEqual( 'bar', self.driver.private_storage.get(share['id'], 'pool_name')) self.driver.zfs.assert_called_once_with( 'create', '-o', 'quota=4G', '-o', 'fook=foov', '-o', 'bark=barv', '-o', 'readonly=off', 'bar/subbar/some_prefix_fake_share_id') mock_get_helper.assert_has_calls([ mock.call('NFS'), mock.call().create_exports(dataset_name) ]) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_create_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.create_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_delete_share(self): dataset_name = 'bar/subbar/some_prefix_fake_share_id' mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(self.driver, '_get_share_helper') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) snap_name = '%s@%s' % ( dataset_name, self.driver.replica_snapshot_prefix) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock( side_effect=[ [{'NAME': 'fake_dataset_name'}, {'NAME': dataset_name}], [{'NAME': 'snap_name'}, {'NAME': '%s@foo' % dataset_name}, {'NAME': snap_name}], ])) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update( share['id'], {'pool_name': 'bar', 'dataset_name': dataset_name} ) self.driver.delete_share(context, share, share_server=None) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', 'bar'), mock.call('list', '-r', '-t', 'snapshot', 'bar'), ]) self.driver._get_share_helper.assert_has_calls([ mock.call('NFS'), mock.call().remove_exports(dataset_name)]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('a')]) mock_delete.assert_has_calls([ mock.call(snap_name), mock.call(dataset_name), ]) self.assertEqual(0, zfs_driver.LOG.warning.call_count) def test_delete_share_absent(self): dataset_name = 'bar/subbar/some_prefix_fake_share_id' mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(self.driver, '_get_share_helper') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) snap_name = '%s@%s' % ( dataset_name, self.driver.replica_snapshot_prefix) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': snap_name}]])) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update(share['id'], {'pool_name': 'bar'}) self.driver.delete_share(context, share, share_server=None) self.assertEqual(0, self.driver._get_share_helper.call_count) self.assertEqual(0, mock_delete.call_count) self.driver.zfs.assert_called_once_with('list', '-r', 'bar') self.driver.parse_zfs_answer.assert_called_once_with('a') zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': share['id'], 'name': dataset_name}) def test_delete_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.delete_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_create_snapshot(self): self.configuration.zfs_dataset_snapshot_name_prefix = 'prefx_' self.mock_object(self.driver, 'zfs') snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id' } snapshot_name = 'foo_data_set_name@prefx_%s' % snapshot['id'] self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': 'foo_data_set_name'}) self.driver.create_snapshot('fake_context', snapshot) self.driver.zfs.assert_called_once_with( 'snapshot', snapshot_name) self.assertEqual( snapshot_name.split('@')[-1], self.driver.private_storage.get( snapshot['snapshot_id'], 'snapshot_tag')) def test_delete_snapshot(self): snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id', } dataset_name = 'foo_zpool/bar_dataset_name' snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [{'NAME': 'some_other_dataset@snapshot_name'}, {'NAME': snap_name}], []])) context = 'fake_context' self.driver.private_storage.update( snapshot['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snapshot['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': dataset_name}) self.driver.delete_snapshot(context, snapshot, share_server=None) self.assertEqual(0, zfs_driver.LOG.warning.call_count) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') mock_delete.assert_called_once_with(snap_name) def test_delete_snapshot_absent(self): snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id', } dataset_name = 'foo_zpool/bar_dataset_name' snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': snap_name}]])) context = 'fake_context' self.driver.private_storage.update( snapshot['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snapshot['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': dataset_name}) self.driver.delete_snapshot(context, snapshot, share_server=None) self.assertEqual(0, mock_delete.call_count) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': snapshot['id'], 'name': snap_name}) def test_delete_snapshot_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.delete_snapshot, 'fake_context', 'fake_snapshot', share_server={'id': 'fake_server'}, ) def test_create_share_from_snapshot(self): mock_get_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, 'zfs') self.mock_object(self.driver, 'execute') mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', 'size': 4, } snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': share['id'], } dataset_name = 'bar/subbar/some_prefix_%s' % share['id'] snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update( snapshot['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snapshot['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': dataset_name}) result = self.driver.create_share_from_snapshot( context, share, snapshot, share_server=None) self.assertEqual( mock_get_helper.return_value.create_exports.return_value, result, ) self.assertEqual( 'share', self.driver.private_storage.get(share['id'], 'entity_type')) self.assertEqual( dataset_name, self.driver.private_storage.get(share['id'], 'dataset_name')) self.assertEqual( 'someuser@2.2.2.2', self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.assertEqual( 'bar', self.driver.private_storage.get(share['id'], 'pool_name')) self.driver.execute.assert_has_calls([ mock.call( 'ssh', 'someuser@2.2.2.2', 'sudo', 'zfs', 'send', '-vDp', snap_name, '|', 'sudo', 'zfs', 'receive', '-v', 'bar/subbar/some_prefix_fake_share_id'), mock.call( 'sudo', 'zfs', 'destroy', 'bar/subbar/some_prefix_fake_share_id@%s' % snap_tag), ]) self.driver.zfs.assert_has_calls([ mock.call('set', opt, 'bar/subbar/some_prefix_fake_share_id') for opt in ('quota=4G', 'bark=barv', 'readonly=off', 'fook=foov') ], any_order=True) mock_get_helper.assert_has_calls([ mock.call('NFS'), mock.call().create_exports(dataset_name) ]) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_create_share_from_snapshot_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.create_share_from_snapshot, 'fake_context', 'fake_share', 'fake_snapshot', share_server={'id': 'fake_server'}, ) def test_get_pool(self): share = {'host': 'hostname@backend_name#bar'} result = self.driver.get_pool(share) self.assertEqual('bar', result) @ddt.data('on', 'off', 'rw=1.1.1.1') def test_ensure_share(self, get_zfs_option_answer): share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', } dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value=get_zfs_option_answer)) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[{'NAME': 'fake1'}, {'NAME': dataset_name}, {'NAME': 'fake2'}]] * 2)) for s in ('1', '2'): self.driver.zfs.reset_mock() self.driver.get_zfs_option.reset_mock() mock_helper.reset_mock() self.driver.parse_zfs_answer.reset_mock() self.driver._get_dataset_name.reset_mock() self.driver.share_export_ip = '1.1.1.%s' % s self.driver.service_ip = '2.2.2.%s' % s self.configuration.zfs_ssh_username = 'user%s' % s result = self.driver.ensure_share('fake_context', share) self.assertEqual( 'user%(s)s@2.2.2.%(s)s' % {'s': s}, self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'sharenfs') mock_helper.assert_called_once_with( share['share_proto']) mock_helper.return_value.get_exports.assert_called_once_with( dataset_name) expected_calls = [mock.call('list', '-r', 'bar')] if get_zfs_option_answer != 'off': expected_calls.append(mock.call('share', dataset_name)) self.driver.zfs.assert_has_calls(expected_calls) self.driver.parse_zfs_answer.assert_called_once_with('a') self.driver._get_dataset_name.assert_called_once_with(share) self.assertEqual( mock_helper.return_value.get_exports.return_value, result, ) def test_ensure_share_absent(self): share = {'id': 'fake_share_id', 'host': 'hostname@backend_name#bar'} dataset_name = 'foo_zpool/foo_fs' self.driver.private_storage.update( share['id'], {'dataset_name': dataset_name}) self.mock_object(self.driver, 'get_zfs_option') self.mock_object(self.driver, '_get_share_helper') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': dataset_name}]])) self.assertRaises( exception.ShareResourceNotFound, self.driver.ensure_share, 'fake_context', share, ) self.assertEqual(0, self.driver.get_zfs_option.call_count) self.assertEqual(0, self.driver._get_share_helper.call_count) self.driver.zfs.assert_called_once_with('list', '-r', 'bar') self.driver.parse_zfs_answer.assert_called_once_with('a') def test_ensure_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.ensure_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_get_network_allocations_number(self): self.assertEqual(0, self.driver.get_network_allocations_number()) def test_extend_share(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.driver.extend_share('fake_share', 5) self.driver._get_dataset_name.assert_called_once_with('fake_share') self.driver.zfs.assert_called_once_with( 'set', 'quota=5G', dataset_name) def test_extend_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.extend_share, 'fake_context', 'fake_share', 5, share_server={'id': 'fake_server'}, ) def test_shrink_share(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value='4G')) share = {'id': 'fake_share_id'} self.driver.shrink_share(share, 5) self.driver._get_dataset_name.assert_called_once_with(share) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'used') self.driver.zfs.assert_called_once_with( 'set', 'quota=5G', dataset_name) def test_shrink_share_data_loss(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value='6G')) share = {'id': 'fake_share_id'} self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self.driver.shrink_share, share, 5) self.driver._get_dataset_name.assert_called_once_with(share) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'used') self.assertEqual(0, self.driver.zfs.call_count) def test_shrink_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.shrink_share, 'fake_context', 'fake_share', 5, share_server={'id': 'fake_server'}, ) def test__get_replication_snapshot_prefix(self): replica = {'id': 'foo-_bar-_id'} self.driver.replica_snapshot_prefix = 'PrEfIx' result = self.driver._get_replication_snapshot_prefix(replica) self.assertEqual('PrEfIx_foo__bar__id', result) def test__get_replication_snapshot_tag(self): replica = {'id': 'foo-_bar-_id'} self.driver.replica_snapshot_prefix = 'PrEfIx' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') result = self.driver._get_replication_snapshot_tag(replica) self.assertEqual( ('PrEfIx_foo__bar__id_time_' '%s' % mock_utcnow.return_value.isoformat.return_value), result) mock_utcnow.assert_called_once_with() mock_utcnow.return_value.isoformat.assert_called_once_with() def test__get_active_replica(self): replica_list = [ {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'id': '1'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, 'id': '2'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC, 'id': '3'}, ] result = self.driver._get_active_replica(replica_list) self.assertEqual(replica_list[1], result) def test__get_active_replica_not_found(self): replica_list = [ {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'id': '1'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC, 'id': '3'}, ] self.assertRaises( exception.ReplicationException, self.driver._get_active_replica, replica_list, ) def test_update_access(self): self.mock_object(self.driver, '_get_dataset_name') mock_helper = self.mock_object(self.driver, '_get_share_helper') share = {'share_proto': 'NFS'} result = self.driver.update_access( 'fake_context', share, [1], [2], [3]) self.driver._get_dataset_name.assert_called_once_with(share) self.assertEqual( mock_helper.return_value.update_access.return_value, result, ) def test_update_access_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.update_access, 'fake_context', 'fake_share', [], [], [], share_server={'id': 'fake_server'}, ) def test_unmanage(self): share = {'id': 'fake_share_id'} self.mock_object(self.driver.private_storage, 'delete') self.driver.unmanage(share) self.driver.private_storage.delete.assert_called_once_with(share['id']) def test__delete_dataset_or_snapshot_with_retry_snapshot(self): self.mock_object(self.driver, 'get_zfs_option') self.mock_object(self.driver, 'zfs') self.driver._delete_dataset_or_snapshot_with_retry('foo@bar') self.driver.get_zfs_option.assert_called_once_with( 'foo@bar', 'mountpoint') self.driver.zfs.assert_called_once_with( 'destroy', '-f', 'foo@bar') def test__delete_dataset_or_snapshot_with_retry_of(self): self.mock_object(self.driver, 'get_zfs_option') self.mock_object( self.driver, 'execute', mock.Mock(return_value=('a', 'b'))) self.mock_object(zfs_driver.time, 'sleep') self.mock_object(zfs_driver.LOG, 'debug') self.mock_object( zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2))) dataset_name = 'fake/dataset/name' self.assertRaises( exception.ZFSonLinuxException, self.driver._delete_dataset_or_snapshot_with_retry, dataset_name, ) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'mountpoint') self.assertEqual(31, zfs_driver.time.time.call_count) self.assertEqual(29, zfs_driver.time.sleep.call_count) self.assertEqual(29, zfs_driver.LOG.debug.call_count) def test__delete_dataset_or_snapshot_with_retry_temp_of(self): self.mock_object(self.driver, 'get_zfs_option') self.mock_object(self.driver, 'zfs') self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), exception.ProcessExecutionError( 'FAKE lsof returns not found')])) self.mock_object(zfs_driver.time, 'sleep') self.mock_object(zfs_driver.LOG, 'debug') self.mock_object( zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2))) dataset_name = 'fake/dataset/name' self.driver._delete_dataset_or_snapshot_with_retry(dataset_name) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'mountpoint') self.assertEqual(3, zfs_driver.time.time.call_count) self.assertEqual(2, self.driver.execute.call_count) self.assertEqual(1, zfs_driver.LOG.debug.call_count) zfs_driver.LOG.debug.assert_called_once_with( mock.ANY, {'name': dataset_name, 'out': 'a'}) zfs_driver.time.sleep.assert_called_once_with(2) self.driver.zfs.assert_called_once_with('destroy', '-f', dataset_name) def test__delete_dataset_or_snapshot_with_retry_busy(self): self.mock_object(self.driver, 'get_zfs_option') self.mock_object( self.driver, 'execute', mock.Mock( side_effect=exception.ProcessExecutionError( 'FAKE lsof returns not found'))) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[ exception.ProcessExecutionError( 'cannot destroy FAKE: dataset is busy\n'), None, None])) self.mock_object(zfs_driver.time, 'sleep') self.mock_object(zfs_driver.LOG, 'info') dataset_name = 'fake/dataset/name' self.driver._delete_dataset_or_snapshot_with_retry(dataset_name) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'mountpoint') self.assertEqual(2, zfs_driver.time.sleep.call_count) self.assertEqual(2, self.driver.execute.call_count) self.assertEqual(1, zfs_driver.LOG.info.call_count) self.assertEqual(2, self.driver.zfs.call_count) def test_create_replica(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica_list = [active_replica] new_replica = { 'id': 'fake_new_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': None, } dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % new_replica['id']) access_rules = ['foo_rule', 'bar_rule'] self.driver.private_storage.update( active_replica['id'], {'dataset_name': 'fake/active/dataset/name', 'ssh_cmd': 'fake_ssh_cmd'} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object(self.driver, 'zfs') mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' result = self.driver.create_replica( 'fake_context', replica_list, new_replica, access_rules, []) expected = { 'export_locations': ( mock_helper.return_value.create_exports.return_value), 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, } self.assertEqual(expected, result) mock_helper.assert_has_calls([ mock.call('NFS'), mock.call().update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True), mock.call('NFS'), mock.call().create_exports(dst_dataset_name), ]) self.driver.zfs.assert_has_calls([ mock.call('set', 'readonly=on', dst_dataset_name), mock.call('set', 'quota=%sG' % active_replica['size'], dst_dataset_name), ]) src_snapshot_name = ( 'fake/active/dataset/name@' 'tmp_snapshot_for_replication__fake_new_replica_id_time_some_time') self.driver.execute.assert_has_calls([ mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'snapshot', src_snapshot_name), mock.call( 'ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|', 'ssh', 'fake_username@240.241.242.244', 'sudo', 'zfs', 'receive', '-v', dst_dataset_name ), ]) mock_utcnow.assert_called_once_with() mock_utcnow.return_value.isoformat.assert_called_once_with() def test_delete_replica_not_found(self): dataset_name = 'foo/dataset/name' pool_name = 'foo_pool' replica = {'id': 'fake_replica_id'} replica_list = [replica] replica_snapshots = [] self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], []])) self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object(self.driver, '_get_share_helper') self.driver.private_storage.update( replica['id'], {'pool_name': pool_name}) self.driver.delete_replica('fake_context', replica_list, replica_snapshots, replica) zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': replica['id'], 'name': dataset_name}) self.assertEqual(0, self.driver._get_share_helper.call_count) self.assertEqual( 0, self.driver._delete_dataset_or_snapshot_with_retry.call_count) self.driver._get_dataset_name.assert_called_once_with(replica) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', '-t', 'snapshot', pool_name), mock.call('list', '-r', pool_name), ]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('c'), ]) def test_delete_replica(self): dataset_name = 'foo/dataset/name' pool_name = 'foo_pool' replica = {'id': 'fake_replica_id', 'share_proto': 'NFS'} replica_list = [replica] self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [{'NAME': 'some_other_dataset@snapshot'}, {'NAME': dataset_name + '@foo_snap'}], [{'NAME': 'some_other_dataset'}, {'NAME': dataset_name}], ])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.driver.private_storage.update( replica['id'], {'pool_name': pool_name, 'dataset_name': dataset_name}) self.driver.delete_replica('fake_context', replica_list, [], replica) self.assertEqual(0, zfs_driver.LOG.warning.call_count) self.assertEqual(0, self.driver._get_dataset_name.call_count) self.driver._delete_dataset_or_snapshot_with_retry.assert_has_calls([ mock.call(dataset_name + '@foo_snap'), mock.call(dataset_name), ]) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', '-t', 'snapshot', pool_name), mock.call('list', '-r', pool_name), ]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('c'), ]) mock_helper.assert_called_once_with(replica['share_proto']) mock_helper.return_value.remove_exports.assert_called_once_with( dataset_name) def test_update_replica(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_new_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': None, } replica_list = [replica, active_replica] replica_snapshots = [] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( replica) self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.driver.private_storage.update( replica['id'], {'dataset_name': dst_dataset_name, 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[('a', 'b'), ('c', 'd'), ('e', 'f')])) self.mock_object(self.driver, 'execute_with_retry', mock.Mock(side_effect=[('g', 'h')])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('j', 'k'), ('l', 'm')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ ({'NAME': dst_dataset_name + '@' + old_repl_snapshot_tag}, {'NAME': dst_dataset_name + '@%s_time_some_time' % snap_tag_prefix}, {'NAME': 'other/dataset/name1@' + old_repl_snapshot_tag}), ({'NAME': src_dataset_name + '@' + old_repl_snapshot_tag}, {'NAME': src_dataset_name + '@' + snap_tag_prefix + 'quuz'}, {'NAME': 'other/dataset/name2@' + old_repl_snapshot_tag}), ]) ) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.update_replica_state( 'fake_context', replica_list, replica, access_rules, replica_snapshots) self.assertEqual(zfs_driver.constants.REPLICA_STATE_IN_SYNC, result) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) self.driver.execute_with_retry.assert_called_once_with( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'destroy', '-f', src_dataset_name + '@' + snap_tag_prefix + 'quuz') self.driver.execute.assert_has_calls([ mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'snapshot', src_dataset_name + '@' + self.driver._get_replication_snapshot_tag(replica)), mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'send', '-vDRI', old_repl_snapshot_tag, src_dataset_name + '@%s' % snap_tag_prefix + '_time_some_time', '|', 'ssh', 'fake_dst_ssh_cmd', 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name), mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', 'bar'), ]) mock_delete_snapshot.assert_called_once_with( dst_dataset_name + '@' + old_repl_snapshot_tag) self.driver.parse_zfs_answer.assert_has_calls( [mock.call('l'), mock.call('e')]) def test_promote_replica_active_available(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'host': 'hostname3@backend_name3#quuz', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( active_replica) + '_time_some_time' self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), ('e', 'f'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('g', 'h')])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.promote_replica( 'fake_context', replica_list, replica, access_rules) expected = [ {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC, 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC}, {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE}, {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC, 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(3, len(result)) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) self.driver.zfs.assert_called_once_with( 'set', 'readonly=off', dst_dataset_name) self.assertEqual(0, mock_delete_snapshot.call_count) for repl in (active_replica, replica): self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( second_replica['id'], 'repl_snapshot_tag')) def test_promote_replica_active_not_available(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'host': 'hostname3@backend_name3#quuz', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } third_replica = { 'id': 'fake_third_replica_id', 'host': 'hostname4@backend_name4#fff', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica, third_replica] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( replica) + '_time_some_time' self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica, third_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ exception.ProcessExecutionError('Active replica failure'), ('a', 'b'), exception.ProcessExecutionError('Second replica sync failure'), ('c', 'd'), ])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('g', 'h'), ('i', 'j')])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.promote_replica( 'fake_context', replica_list, replica, access_rules) expected = [ {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC, 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE}, {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC, 'id': 'fake_second_replica_id'}, {'access_rules_status': zfs_driver.constants.STATUS_OUT_OF_SYNC, 'id': 'fake_third_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(4, len(result)) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) self.driver.zfs.assert_has_calls([ mock.call('snapshot', dst_dataset_name + '@' + snap_tag_prefix), mock.call('set', 'readonly=off', dst_dataset_name), ]) self.assertEqual(0, mock_delete_snapshot.call_count) for repl in (second_replica, replica): self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) for repl in (active_replica, third_replica): self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) def test_create_replicated_snapshot(self): active_replica = { 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] snapshot_instances = [ {'id': 'si_%s' % r['id'], 'share_instance_id': r['id'], 'snapshot_id': 'some_snapshot_id'} for r in replica_list ] src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), ('e', 'f'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' self.configuration.zfs_dataset_snapshot_name_prefix = ( 'fake_dataset_snapshot_name_prefix') snap_tag_prefix = ( self.configuration.zfs_dataset_snapshot_name_prefix + 'si_%s' % active_replica['id']) repl_snap_tag = 'fake_repl_tag' self.mock_object( self.driver, '_get_replication_snapshot_tag', mock.Mock(return_value=repl_snap_tag)) result = self.driver.create_replicated_snapshot( 'fake_context', replica_list, snapshot_instances) expected = [ {'id': 'si_fake_active_replica_id', 'status': zfs_driver.constants.STATUS_AVAILABLE}, {'id': 'si_fake_first_replica_id', 'status': zfs_driver.constants.STATUS_AVAILABLE}, {'id': 'si_fake_second_replica_id', 'status': zfs_driver.constants.STATUS_ERROR}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(3, len(result)) for repl in (active_replica, replica): self.assertEqual( repl_snap_tag, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( second_replica['id'], 'repl_snapshot_tag')) self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( snapshot_instances[0]['snapshot_id'], 'snapshot_tag')) self.driver._get_replication_snapshot_tag.assert_called_once_with( active_replica) def test_delete_replicated_snapshot(self): active_replica = { 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] active_snapshot_instance = { 'id': 'si_%s' % active_replica['id'], 'share_instance_id': active_replica['id'], 'snapshot_id': 'some_snapshot_id', 'share_id': 'some_share_id', } snapshot_instances = [ {'id': 'si_%s' % r['id'], 'share_instance_id': r['id'], 'snapshot_id': active_snapshot_instance['snapshot_id'], 'share_id': active_snapshot_instance['share_id']} for r in (replica, second_replica) ] snapshot_instances.append(active_snapshot_instance) for si in snapshot_instances: self.driver.private_storage.update( si['id'], {'snapshot_name': 'fake_snap_name_%s' % si['id']}) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for replica in (replica, second_replica): self.driver.private_storage.update( replica['id'], {'dataset_name': 'some_dataset_name', 'ssh_cmd': 'fake_ssh_cmd'} ) self.driver.private_storage.update( snapshot_instances[0]['snapshot_id'], {'snapshot_tag': 'foo_snapshot_tag'} ) snap_name = 'fake_snap_name' self.mock_object(self.driver, '_delete_snapshot') self.mock_object( self.driver, '_get_saved_snapshot_name', mock.Mock(return_value=snap_name)) self.mock_object(self.driver, 'execute_with_retry') self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ ({'NAME': 'foo'}, {'NAME': snap_name}), ({'NAME': 'bar'}, {'NAME': snap_name}), ])) expected = sorted([ {'id': si['id'], 'status': 'deleted'} for si in snapshot_instances ], key=lambda item: item['id']) result = self.driver.delete_replicated_snapshot( 'fake_context', replica_list, snapshot_instances) self.driver._get_saved_snapshot_name.assert_has_calls([ mock.call(si) for si in snapshot_instances ]) self.driver._delete_snapshot.assert_called_once_with( 'fake_context', active_snapshot_instance) self.driver.execute.assert_has_calls([ mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', snap_name) for i in (0, 1) ]) self.driver.execute_with_retry.assert_has_calls([ mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'destroy', '-f', snap_name) for i in (0, 1) ]) self.assertIsInstance(result, list) self.assertEqual(3, len(result)) self.assertEqual(expected, sorted(result, key=lambda item: item['id'])) @ddt.data( ({'NAME': 'fake'}, zfs_driver.constants.STATUS_ERROR), ({'NAME': 'fake_snap_name'}, zfs_driver.constants.STATUS_AVAILABLE), ) @ddt.unpack def test_update_replicated_snapshot(self, parse_answer, expected_status): snap_name = 'fake_snap_name' self.mock_object(self.driver, '_update_replica_state') self.mock_object( self.driver, '_get_saved_snapshot_name', mock.Mock(return_value=snap_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [parse_answer] ])) fake_context = 'fake_context' replica_list = ['foo', 'bar'] share_replica = 'quuz' snapshot_instance = {'id': 'fake_snapshot_instance_id'} snapshot_instances = ['q', 'w', 'e', 'r', 't', 'y'] result = self.driver.update_replicated_snapshot( fake_context, replica_list, share_replica, snapshot_instances, snapshot_instance) self.driver._update_replica_state.assert_called_once_with( fake_context, replica_list, share_replica) self.driver._get_saved_snapshot_name.assert_called_once_with( snapshot_instance) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') self.assertIsInstance(result, dict) self.assertEqual(2, len(result)) self.assertIn('status', result) self.assertIn('id', result) self.assertEqual(expected_status, result['status']) self.assertEqual(snapshot_instance['id'], result['id']) manila-2.0.0/manila/tests/share/drivers/zfsonlinux/__init__.py0000664000567000056710000000000012701407107025613 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/zfsonlinux/test_utils.py0000664000567000056710000004416112701407107026273 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import ddt import mock from oslo_config import cfg from manila import exception from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.zfsonlinux import utils as zfs_utils from manila import test CONF = cfg.CONF def get_fake_configuration(*args, **kwargs): fake_config_options = { "zfs_use_ssh": kwargs.get("zfs_use_ssh", False), "zfs_share_export_ip": kwargs.get( "zfs_share_export_ip", "240.241.242.243"), "zfs_service_ip": kwargs.get("zfs_service_ip", "240.241.242.244"), "ssh_conn_timeout": kwargs.get("ssh_conn_timeout", 123), "zfs_ssh_username": kwargs.get( "zfs_ssh_username", 'fake_username'), "zfs_ssh_user_password": kwargs.get( "zfs_ssh_user_password", 'fake_pass'), "zfs_ssh_private_key_path": kwargs.get( "zfs_ssh_private_key_path", '/fake/path'), "append_config_values": mock.Mock(), } return type("FakeConfig", (object, ), fake_config_options) class FakeShareDriver(zfs_utils.ExecuteMixin): def __init__(self, *args, **kwargs): self.configuration = get_fake_configuration(*args, **kwargs) self.init_execute_mixin(*args, **kwargs) @ddt.ddt class ExecuteMixinTestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor') self.driver = FakeShareDriver() def test_init(self): self.assertIsNone(self.driver.ssh_executor) self.assertEqual(0, self.ssh_executor.call_count) def test_init_ssh(self): driver = FakeShareDriver(zfs_use_ssh=True) self.assertIsNotNone(driver.ssh_executor) self.ssh_executor.assert_called_once_with( ip=driver.configuration.zfs_service_ip, port=22, conn_timeout=driver.configuration.ssh_conn_timeout, login=driver.configuration.zfs_ssh_username, password=driver.configuration.zfs_ssh_user_password, privatekey=driver.configuration.zfs_ssh_private_key_path, max_size=10, ) def test_local_shell_execute(self): self.mock_object(self.driver, '_execute') self.driver.execute('fake', '--foo', '--bar') self.assertEqual(0, self.ssh_executor.call_count) self.driver._execute.assert_called_once_with( 'fake', '--foo', '--bar') def test_local_shell_execute_with_sudo(self): self.mock_object(self.driver, '_execute') self.driver.execute('sudo', 'fake', '--foo', '--bar') self.assertEqual(0, self.ssh_executor.call_count) self.driver._execute.assert_called_once_with( 'fake', '--foo', '--bar', run_as_root=True) def test_ssh_execute(self): driver = FakeShareDriver(zfs_use_ssh=True) self.mock_object(driver, '_execute') driver.execute('fake', '--foo', '--bar') self.assertEqual(0, driver._execute.call_count) self.ssh_executor.return_value.assert_called_once_with( 'fake', '--foo', '--bar') def test_ssh_execute_with_sudo(self): driver = FakeShareDriver(zfs_use_ssh=True) self.mock_object(driver, '_execute') driver.execute('sudo', 'fake', '--foo', '--bar') self.assertEqual(0, driver._execute.call_count) self.ssh_executor.return_value.assert_called_once_with( 'fake', '--foo', '--bar', run_as_root=True) def test_execute_with_retry(self): self.mock_object(time, 'sleep') self.mock_object(self.driver, 'execute', mock.Mock( side_effect=[exception.ProcessExecutionError('FAKE'), None])) self.driver.execute_with_retry('foo', 'bar') self.assertEqual(2, self.driver.execute.call_count) self.driver.execute.assert_has_calls( [mock.call('foo', 'bar'), mock.call('foo', 'bar')]) def test_execute_with_retry_exceeded(self): self.mock_object(time, 'sleep') self.mock_object(self.driver, 'execute', mock.Mock( side_effect=exception.ProcessExecutionError('FAKE'))) self.assertRaises( exception.ProcessExecutionError, self.driver.execute_with_retry, 'foo', 'bar', ) self.assertEqual(36, self.driver.execute.call_count) @ddt.data(True, False) def test__get_option(self, pool_level): out = """NAME PROPERTY VALUE SOURCE\n foo_resource_name bar_option_name some_value local""" self.mock_object( self.driver, '_execute', mock.Mock(return_value=(out, ''))) res_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver._get_option( res_name, opt_name, pool_level=pool_level) self.assertEqual('some_value', result) self.driver._execute.assert_called_once_with( 'zpool' if pool_level else 'zfs', 'get', opt_name, res_name, run_as_root=True) def test_parse_zfs_answer(self): not_parsed_str = '' not_parsed_str = """NAME PROPERTY VALUE SOURCE\n foo_res opt_1 bar local foo_res opt_2 foo default foo_res opt_3 some_value local""" expected = [ {'NAME': 'foo_res', 'PROPERTY': 'opt_1', 'VALUE': 'bar', 'SOURCE': 'local'}, {'NAME': 'foo_res', 'PROPERTY': 'opt_2', 'VALUE': 'foo', 'SOURCE': 'default'}, {'NAME': 'foo_res', 'PROPERTY': 'opt_3', 'VALUE': 'some_value', 'SOURCE': 'local'}, ] result = self.driver.parse_zfs_answer(not_parsed_str) self.assertEqual(expected, result) def test_parse_zfs_answer_empty(self): result = self.driver.parse_zfs_answer('') self.assertEqual([], result) def test_get_zpool_option(self): self.mock_object(self.driver, '_get_option') zpool_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver.get_zpool_option(zpool_name, opt_name) self.assertEqual(self.driver._get_option.return_value, result) self.driver._get_option.assert_called_once_with( zpool_name, opt_name, True) def test_get_zfs_option(self): self.mock_object(self.driver, '_get_option') dataset_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver.get_zfs_option(dataset_name, opt_name) self.assertEqual(self.driver._get_option.return_value, result) self.driver._get_option.assert_called_once_with( dataset_name, opt_name, False) def test_zfs(self): self.mock_object(self.driver, 'execute') self.mock_object(self.driver, 'execute_with_retry') self.driver.zfs('foo', 'bar') self.assertEqual(0, self.driver.execute_with_retry.call_count) self.driver.execute.asssert_called_once_with( 'sudo', 'zfs', 'foo', 'bar') @ddt.ddt class NFSviaZFSHelperTestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() configuration = get_fake_configuration() self.out = "fake_out" self.mock_object( zfs_utils.utils, "execute", mock.Mock(return_value=(self.out, ""))) self.helper = zfs_utils.NFSviaZFSHelper(configuration) def test_init(self): zfs_utils.utils.execute.assert_has_calls([ mock.call("which", "exportfs"), mock.call("exportfs", run_as_root=True), ]) def test_verify_setup_exportfs_not_installed(self): zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = [('', '')] self.assertRaises( exception.ZFSonLinuxException, self.helper.verify_setup) zfs_utils.utils.execute.assert_called_once_with("which", "exportfs") def test_verify_setup_error_calling_exportfs(self): zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = [ ('fake_out', ''), exception.ProcessExecutionError('Fake')] self.assertRaises( exception.ProcessExecutionError, self.helper.verify_setup) zfs_utils.utils.execute.assert_has_calls([ mock.call("which", "exportfs"), mock.call("exportfs", run_as_root=True), ]) def test_is_kernel_version_true(self): zfs_utils.utils.execute.reset_mock() self.assertTrue(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_is_kernel_version_false(self): zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = ( exception.ProcessExecutionError('Fake')) self.assertFalse(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_is_kernel_version_second_call(self): zfs_utils.utils.execute.reset_mock() self.assertTrue(self.helper.is_kernel_version) self.assertTrue(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_create_exports(self): self.mock_object(self.helper, 'get_exports') result = self.helper.create_exports('foo') self.assertEqual( self.helper.get_exports.return_value, result) def test_get_exports(self): self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='fake_mp')) expected = [ { "path": "%s:fake_mp" % ip, "metadata": {}, "is_admin_only": is_admin_only, } for ip, is_admin_only in ( (self.helper.configuration.zfs_share_export_ip, False), (self.helper.configuration.zfs_service_ip, True)) ] result = self.helper.get_exports('foo') self.assertEqual(expected, result) self.helper.get_zfs_option.assert_called_once_with('foo', 'mountpoint') def test_remove_exports(self): zfs_utils.utils.execute.reset_mock() self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='bar')) self.helper.remove_exports('foo') self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs') zfs_utils.utils.execute.assert_called_once_with( 'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True) def test_remove_exports_that_absent(self): zfs_utils.utils.execute.reset_mock() self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='off')) self.helper.remove_exports('foo') self.helper.get_zfs_option.assert_called_once_with('foo', 'sharenfs') self.assertEqual(0, zfs_utils.utils.execute.call_count) @ddt.data( (('fake_modinfo_result', ''), ('sharenfs=rw=1.1.1.1:3.3.3.0/255.255.255.0,no_root_squash,' 'ro=2.2.2.2,no_root_squash'), False), (('fake_modinfo_result', ''), ('sharenfs=ro=1.1.1.1:2.2.2.2:3.3.3.0/255.255.255.0,no_root_squash'), True), (exception.ProcessExecutionError('Fake'), ('sharenfs=1.1.1.1:rw,no_root_squash 3.3.3.0/255.255.255.0:rw,' 'no_root_squash 2.2.2.2:ro,no_root_squash'), False), (exception.ProcessExecutionError('Fake'), ('sharenfs=1.1.1.1:ro,no_root_squash 2.2.2.2:ro,' 'no_root_squash 3.3.3.0/255.255.255.0:ro,no_root_squash'), True), ) @ddt.unpack def test_update_access_rw_and_ro(self, modinfo_response, access_str, make_all_ro): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ modinfo_response, ("""NAME USED AVAIL REFER MOUNTPOINT\n %(dn)s 2.58M 14.8G 27.5K /%(dn)s\n %(dn)s_some_other 3.58M 15.8G 28.5K /%(dn)s\n """ % {'dn': dataset_name}, ''), ('fake_set_opt_result', ''), ("""NAME PROPERTY VALUE SOURCE\n %s mountpoint /%s default\n """ % (dataset_name, dataset_name), ''), ('fake_1_result', ''), ('fake_2_result', ''), ('fake_3_result', ''), ('fake_4_result', ''), ('fake_5_result', ''), ] access_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '2.2.2.2'}, {'access_type': 'ip', 'access_level': 'rw', 'access_to': '3.3.3.0/24'}, ] delete_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '4.4.4.4'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.5/32'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.6/16'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.7/0'}, {'access_type': 'user', 'access_level': 'rw', 'access_to': '6.6.6.6'}, {'access_type': 'user', 'access_level': 'ro', 'access_to': '7.7.7.7'}, ] self.helper.update_access( dataset_name, access_rules, [], delete_rules, make_all_ro=make_all_ro) zfs_utils.utils.execute.assert_has_calls([ mock.call('modinfo', 'zfs'), mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), mock.call( 'zfs', 'set', access_str, dataset_name, run_as_root=True), mock.call( 'zfs', 'get', 'mountpoint', dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '4.4.4.4:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.5:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.6/255.255.0.0:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.7/0.0.0.0:/%s' % dataset_name, run_as_root=True), ]) def test_update_access_dataset_not_found(self): self.mock_object(zfs_utils.LOG, 'warning') zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ ('fake_modinfo_result', ''), ('fake_dataset_not_found_result', ''), ('fake_set_opt_result', ''), ] access_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.helper.update_access(dataset_name, access_rules, [], []) zfs_utils.utils.execute.assert_has_calls([ mock.call('modinfo', 'zfs'), mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), ]) zfs_utils.LOG.warning.assert_called_once_with( mock.ANY, {'name': dataset_name}) @ddt.data(exception.ProcessExecutionError('Fake'), ('Ok', '')) def test_update_access_no_rules(self, first_execute_result): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ ('fake_modinfo_result', ''), ("""NAME USED AVAIL REFER MOUNTPOINT\n %s 2.58M 14.8G 27.5K /%s\n """ % (dataset_name, dataset_name), ''), ('fake_set_opt_result', ''), ] self.helper.update_access(dataset_name, [], [], []) zfs_utils.utils.execute.assert_has_calls([ mock.call('modinfo', 'zfs'), mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), mock.call('zfs', 'set', 'sharenfs=off', dataset_name, run_as_root=True), ]) @ddt.data('user', 'cert', 'cephx', '', 'fake', 'i', 'p') def test_update_access_not_ip_access_type(self, access_type): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' access_rules = [ {'access_type': access_type, 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.assertRaises( exception.InvalidShareAccess, self.helper.update_access, dataset_name, access_rules, access_rules, [], ) self.assertEqual(0, zfs_utils.utils.execute.call_count) @ddt.data('', 'r', 'o', 'w', 'fake', 'su') def test_update_access_neither_rw_nor_ro_access_level(self, access_level): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' access_rules = [ {'access_type': 'ip', 'access_level': access_level, 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.assertRaises( exception.InvalidShareAccess, self.helper.update_access, dataset_name, access_rules, access_rules, [], ) self.assertEqual(0, zfs_utils.utils.execute.call_count) manila-2.0.0/manila/tests/share/drivers/huawei/0000775000567000056710000000000012701407265022564 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/huawei/__init__.py0000664000567000056710000000000012701407107024656 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/huawei/test_huawei_nas.py0000664000567000056710000044661412701407107026332 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Huawei nas driver module.""" import os import shutil import tempfile import time import xml.dom.minidom import ddt import mock from oslo_serialization import jsonutils from manila import context from manila.data import utils as data_utils from manila import db from manila import exception from manila.share import configuration as conf from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import huawei_nas from manila.share.drivers.huawei.v3 import connection from manila.share.drivers.huawei.v3 import helper from manila.share.drivers.huawei.v3 import smartx from manila import test from manila import utils def fake_sleep(time): pass def data_session(url): if url == "/xx/sessions": data = """{"error":{"code":0}, "data":{"username":"admin", "iBaseToken":"2001031430", "deviceid":"210235G7J20000000000"}}""" if url == "sessions": data = '{"error":{"code":0},"data":{"ID":11}}' return data def filesystem(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_manage_uuid"}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": True, "ENABLECOMPRESSION": True}): data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == """{"IOPRIORITY": "3"}""": data = """{"error":{"code":0}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"1", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def filesystem_thick(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_uuid_thickfs"}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool_Thick", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool_Thick", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def filesystem_inpartition(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_manage_uuid"}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == """{"NAME": "share_fake_uuid_inpartition"}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": True, "ENABLECOMPRESSION": True}): data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"1", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"1", "SMARTCACHEPARTITIONID":"1", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"1", "SMARTCACHEPARTITIONID":"1", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def allow_access(type, method, data): allow_ro_flag = False allow_rw_flag = False request_data = jsonutils.loads(data) success_data = """{"error":{"code":0}}""" fail_data = """{"error":{"code":1077939723}}""" ret = None if type == "NFS": if request_data['ACCESSVAL'] == '0': allow_ro_flag = True ret = success_data elif request_data['ACCESSVAL'] == '1': allow_rw_flag = True ret = success_data elif type == "CIFS": if request_data['PERMISSION'] == '0': allow_ro_flag = True ret = success_data elif request_data['PERMISSION'] == '1': allow_rw_flag = True ret = success_data # Group name should start with '@'. if ('group' in request_data['NAME'] and not request_data['NAME'].startswith('@')): ret = fail_data if ret is None: ret = fail_data return (ret, allow_ro_flag, allow_rw_flag) def dec_driver_handles_share_servers(func): def wrapper(*args, **kw): self = args[0] self.configuration.driver_handles_share_servers = True self.recreate_fake_conf_file(logical_port='CTE0.A.H0') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() return func(*args, **kw) return wrapper def QoS_response(method): if method == "GET": data = """{"error":{"code":0}, "data":{"NAME": "OpenStack_Fake_QoS", "MAXIOPS": "100", "FSLIST": "4", "LUNLIST": "", "RUNNINGSTATUS": "2"}}""" elif method == "PUT": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0}, "data":{"ID": "11"}}""" return data class FakeHuaweiNasHelper(helper.RestHelper): def __init__(self, *args, **kwargs): helper.RestHelper.__init__(self, *args, **kwargs) self.test_normal = True self.deviceid = None self.delete_flag = False self.allow_flag = False self.deny_flag = False self.create_snapflag = False self.setupserver_flag = False self.fs_status_flag = True self.create_share_flag = False self.snapshot_flag = True self.service_status_flag = True self.share_exist = True self.service_nfs_status_flag = True self.create_share_data_flag = False self.allow_ro_flag = False self.allow_rw_flag = False self.extend_share_flag = False self.shrink_share_flag = False self.add_fs_to_partition_flag = False self.add_fs_to_cache_flag = False self.test_multi_url_flag = 0 self.cache_exist = True self.partition_exist = True self.alloc_type = None def _change_file_mode(self, filepath): pass def do_call(self, url, data=None, method=None, calltimeout=4): url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '') url = url.replace('/210235G7J20000000000/', '') if self.test_normal: if self.test_multi_url_flag == 1: data = '{"error":{"code":-403}}' res_json = jsonutils.loads(data) return res_json elif self.test_multi_url_flag == 2: if ('http://100.115.10.70:8082/deviceManager/rest/xx/' 'sessions' == url): self.url = url data = data_session("/xx/sessions") res_json = jsonutils.loads(data) return res_json elif (('/xx/sessions' == url) or (self.url is not None and 'http://100.115.10.69:8082/deviceManager/rest' in self.url)): data = '{"error":{"code":-403}}' res_json = jsonutils.loads(data) return res_json if url == "/xx/sessions" or url == "/sessions": data = data_session(url) if url == "/storagepool": data = """{"error":{"code":0}, "data":[{"USERFREECAPACITY":"2097152", "ID":"1", "NAME":"OpenStack_Pool", "USERTOTALCAPACITY":"4194304", "USAGETYPE":"2", "USERCONSUMEDCAPACITY":"2097152"}, {"USERFREECAPACITY":"2097152", "ID":"2", "NAME":"OpenStack_Pool_Thick", "USERTOTALCAPACITY":"4194304", "USAGETYPE":"2", "USERCONSUMEDCAPACITY":"2097152"}]}""" if url == "/filesystem": request_data = jsonutils.loads(data) self.alloc_type = request_data.get('ALLOCTYPE') data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/system/": data = """{"error":{"code":0}, "data":{"PRODUCTVERSION": "V300R003C10"}}""" if url == "/ioclass" or url == "/ioclass/11": data = QoS_response(method) if url == "/ioclass/active/11": data = """{"error":{"code":0}, "data":[{"ID": "11", "MAXIOPS": "100", "FSLIST": ""}]}""" if url == "/NFSHARE" or url == "/CIFSHARE": if self.create_share_flag: data = '{"error":{"code":31755596}}' elif self.create_share_data_flag: data = '{"error":{"code":0}}' else: data = """{"error":{"code":0},"data":{ "ID":"10"}}""" if url == "/NFSHARE?range=[100-200]": if self.share_exist: data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid/"}, {"ID":"2", "FSID":"5", "NAME":"test", "SHAREPATH":"/share_fake_uuid_thickfs/"}, {"ID":"3", "FSID":"6", "NAME":"test", "SHAREPATH":"/share_fake_uuid_inpartition/"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/CIFSHARE?range=[100-200]": data = """{"error":{"code":0}, "data":[{"ID":"2", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid/"}]}""" if url == "/NFSHARE?range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test_fail", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/CIFSHARE?range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"2", "FSID":"4", "NAME":"test_fail", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/NFSHARE/1" or url == "/CIFSHARE/2": data = """{"error":{"code":0}}""" self.delete_flag = True if url == "/FSSNAPSHOT": data = """{"error":{"code":0},"data":{ "ID":"3"}}""" self.create_snapflag = True if url == "/FSSNAPSHOT/4@share_snapshot_fake_snapshot_uuid": if self.snapshot_flag: data = """{"error":{"code":0},"data":{"ID":"3"}}""" else: data = '{"error":{"code":1073754118}}' self.delete_flag = True if url == "/FSSNAPSHOT/3": data = """{"error":{"code":0}}""" self.delete_flag = True if url == "/NFS_SHARE_AUTH_CLIENT": data, self.allow_ro_flag, self.allow_rw_flag = \ allow_access('NFS', method, data) self.allow_flag = True if url == "/CIFS_SHARE_AUTH_CLIENT": data, self.allow_ro_flag, self.allow_rw_flag = \ allow_access('CIFS', method, data) self.allow_flag = True if url == "/FSSNAPSHOT?TYPE=48&PARENTID=4"\ "&&sortby=TIMESTAMP,d&range=[0-2000]": data = """{"error":{"code":0}, "data":[{"ID":"3", "NAME":"share_snapshot_fake_snapshot_uuid"}]}""" self.delete_flag = True if url == "/NFS_SHARE_AUTH_CLIENT?"\ "filter=PARENTID::1&range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"0", "NAME":"100.112.0.1_fail"}]}""" if url == "/CIFS_SHARE_AUTH_CLIENT?"\ "filter=PARENTID::2&range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"0", "NAME":"user_name_fail"}]}""" if url == "/NFS_SHARE_AUTH_CLIENT?"\ "filter=PARENTID::1&range=[100-200]": data = """{"error":{"code":0}, "data":[{"ID":"5", "NAME":"100.112.0.2"}]}""" if url == "/CIFS_SHARE_AUTH_CLIENT?"\ "filter=PARENTID::2&range=[100-200]": data = """{"error":{"code":0}, "data":[{"ID":"6", "NAME":"user_exist"}]}""" if url in ("/NFS_SHARE_AUTH_CLIENT/0", "/NFS_SHARE_AUTH_CLIENT/5", "/CIFS_SHARE_AUTH_CLIENT/0", "/CIFS_SHARE_AUTH_CLIENT/6"): if method == "DELETE": data = """{"error":{"code":0}}""" self.deny_flag = True elif method == "GET": if 'CIFS' in url: data = """{"error":{"code":0}, "data":{"'PERMISSION'":"0"}}""" else: data = """{"error":{"code":0}, "data":{"ACCESSVAL":"0"}}""" else: data = """{"error":{"code":0}}""" self.allow_rw_flagg = True if url == "/NFSHARE/count" or url == "/CIFSHARE/count": data = """{"error":{"code":0},"data":{ "COUNT":"196"}}""" if url == "/NFS_SHARE_AUTH_CLIENT/count?filter=PARENTID::1"\ or url == "/CIFS_SHARE_AUTH_CLIENT/count?filter="\ "PARENTID::2": data = """{"error":{"code":0},"data":{ "COUNT":"196"}}""" if url == "/CIFSSERVICE": if self.service_status_flag: data = """{"error":{"code":0},"data":{ "RUNNINGSTATUS":"2"}}""" else: data = """{"error":{"code":0},"data":{ "RUNNINGSTATUS":"1"}}""" if url == "/NFSSERVICE": if self.service_nfs_status_flag: data = """{"error":{"code":0}, "data":{"RUNNINGSTATUS":"2", "SUPPORTV3":"true", "SUPPORTV4":"true"}}""" else: data = """{"error":{"code":0}, "data":{"RUNNINGSTATUS":"1", "SUPPORTV3":"true", "SUPPORTV4":"true"}}""" self.setupserver_flag = True if url == "/FILESYSTEM?range=[0-8191]": data = """{"error":{"code":0}, "data":[{"ID":"4", "NAME":"share_fake_uuid"}]}""" if url == "/filesystem/4": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/filesystem/5": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem_thick(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/filesystem/6": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem_inpartition(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/cachepartition": if self.partition_exist: data = """{"error":{"code":0}, "data":[{"ID":"7", "NAME":"test_partition_name"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"7", "NAME":"test_partition_name_fail"}]}""" if url == "/cachepartition/1": if self.partition_exist: data = """{"error":{"code":0}, "data":{"ID":"7", "NAME":"test_partition_name"}}""" else: data = """{"error":{"code":0}, "data":{"ID":"7", "NAME":"test_partition_name_fail"}}""" if url == "/SMARTCACHEPARTITION": if self.cache_exist: data = """{"error":{"code":0}, "data":[{"ID":"8", "NAME":"test_cache_name"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"8", "NAME":"test_cache_name_fail"}]}""" if url == "/SMARTCACHEPARTITION/1": if self.cache_exist: data = """{"error":{"code":0}, "data":{"ID":"8", "NAME":"test_cache_name"}}""" else: data = """{"error":{"code":0}, "data":{"ID":"8", "NAME":"test_cache_name_fail"}}""" if url == "/filesystem/associate/cachepartition": data = """{"error":{"code":0}}""" self.add_fs_to_partition_flag = True if url == "/SMARTCACHEPARTITION/CREATE_ASSOCIATE": data = """{"error":{"code":0}}""" self.add_fs_to_cache_flag = True if url == "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE": data = """{"error":{"code":0}}""" if url == "/smartPartition/removeFs": data = """{"error":{"code":0}}""" if url == "/ETH_PORT": data = """{"error":{"code":0}, "data":[{"ID": "4", "LOCATION":"CTE0.A.H0", "IPV4ADDR":"", "BONDNAME":"", "BONDID":"", "RUNNINGSTATUS":"10"}, {"ID": "6", "LOCATION":"CTE0.A.H1", "IPV4ADDR":"", "BONDNAME":"fake_bond", "BONDID":"5", "RUNNINGSTATUS":"10"}]}""" if url == "/ETH_PORT/6": data = """{"error":{"code":0}, "data":{"ID": "6", "LOCATION":"CTE0.A.H1", "IPV4ADDR":"", "BONDNAME":"fake_bond", "BONDID":"5", "RUNNINGSTATUS":"10"}}""" if url == "/BOND_PORT": data = "{\"error\":{\"code\":0},\ \"data\":[{\"ID\": \"5\",\ \"NAME\":\"fake_bond\",\ \"PORTIDLIST\": \"[\\\"6\\\"]\",\ \"RUNNINGSTATUS\":\"10\"}]}" if url == "/vlan": if method == "GET": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/LIF": if method == "GET": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/DNS_Server": if method == "GET": data = "{\"error\":{\"code\":0},\"data\":{\ \"ADDRESS\":\"[\\\"\\\"]\"}}" else: data = """{"error":{"code":0}}""" if url == "/AD_CONFIG": if method == "GET": data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"1", "FULLDOMAINNAME":"huawei.com"}}""" else: data = """{"error":{"code":0}}""" if url == "/LDAP_CONFIG": if method == "GET": data = """{"error":{"code":0},"data":{ "BASEDN":"dc=huawei,dc=com", "LDAPSERVER": "100.97.5.87"}}""" else: data = """{"error":{"code":0}}""" else: data = '{"error":{"code":31755596}}' res_json = jsonutils.loads(data) return res_json class FakeHuaweiNasDriver(huawei_nas.HuaweiNasDriver): """Fake HuaweiNasDriver.""" def __init__(self, *args, **kwargs): huawei_nas.HuaweiNasDriver.__init__(self, *args, **kwargs) self.plugin = FakeV3StorageConnection(self.configuration) class FakeV3StorageConnection(connection.V3StorageConnection): """Fake V3StorageConnection.""" def __init__(self, configuration): connection.V3StorageConnection.__init__(self, configuration) self.configuration = configuration self.helper = FakeHuaweiNasHelper(self.configuration) @ddt.ddt class HuaweiShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(HuaweiShareDriverTestCase, self).setUp() self._context = context.get_admin_context() def _safe_get(opt): return getattr(self.configuration, opt) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.share_backend_name = 'fake_share_backend_name' self.configuration.huawei_share_backend = 'V3' self.configuration.max_over_subscription_ratio = 1 self.configuration.driver_handles_share_servers = False self.configuration.replication_domain = None self.tmp_dir = tempfile.mkdtemp() self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml' self.addCleanup(shutil.rmtree, self.tmp_dir) self.create_fake_conf_file(self.fake_conf_file) self.addCleanup(os.remove, self.fake_conf_file) self.configuration.manila_huawei_conf_file = self.fake_conf_file self._helper_fake = mock.Mock() self.mock_object(huawei_nas.importutils, 'import_object', mock.Mock(return_value=self._helper_fake)) self.mock_object(time, 'sleep', fake_sleep) self.driver = FakeHuaweiNasDriver(configuration=self.configuration) self.driver.plugin.helper.test_normal = True self.share_nfs = { 'id': 'fake_uuid', 'share_id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_nfs_thick = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool_Thick', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'share_type_id': 'fake_id', } self.share_nfs_thickfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-thickfs', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_thickfs'}, ], 'share_type_id': 'fake_id', } self.share_nfs_thick_thickfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-thickfs', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool_Thick', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_thickfs'}, ], 'share_type_id': 'fake_id', } self.share_nfs_inpartition = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-inpartition', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_inpartition'}, ], 'share_type_id': 'fake_id', } self.share_manage_nfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_pool_name_not_match = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool_not_match', 'share_type_id': 'fake_id', } self.share_proto_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'proto_fail', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', } self.share_cifs = { 'id': 'fake_uuid', 'share_id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'CIFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': 'share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_manage_cifs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'CIFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '\\\\100.115.10.68\\share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.nfs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'NFS', }, } self.cifs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'CIFS', }, } self.security_service = { 'id': 'fake_id', 'domain': 'FAKE', 'server': 'fake_server', 'user': 'fake_user', 'password': 'fake_password', } self.access_ip = { 'access_type': 'ip', 'access_to': '100.112.0.1', 'access_level': 'rw', } self.access_ip_exist = { 'access_type': 'ip', 'access_to': '100.112.0.2', 'access_level': 'rw', } self.access_user = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'rw', } self.access_user_exist = { 'access_type': 'user', 'access_to': 'user_exist', 'access_level': 'rw', } self.access_group = { 'access_type': 'user', 'access_to': 'group_name', 'access_level': 'rw', } self.access_cert = { 'access_type': 'cert', 'access_to': 'fake_cert', 'access_level': 'rw', } self.driver_options = { 'volume_id': 'fake', } self.share_server = None self.driver._licenses = ['fake'] self.fake_network_allocations = [{ 'id': 'fake_network_allocation_id', 'ip_address': '111.111.111.109', }] self.fake_network_info = { 'server_id': '0', 'segmentation_id': '2', 'cidr': '111.111.111.0/24', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', 'nova_net_id': '', 'security_services': '', 'network_allocations': self.fake_network_allocations, 'network_type': 'vlan', } self.fake_active_directory = { 'type': 'active_directory', 'dns_ip': '100.97.5.5', 'user': 'ad_user', 'password': 'ad_password', 'domain': 'huawei.com' } self.fake_ldap = { 'type': 'ldap', 'server': '100.97.5.87', 'domain': 'dc=huawei,dc=com' } fake_share_type_id_not_extra = 'fake_id' self.fake_type_not_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': {}, 'required_extra_specs': {}, 'id': fake_share_type_id_not_extra, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' True', 'huawei_smartcache:cachename': 'test_cache_name', 'capabilities:huawei_smartpartition': ' True', 'huawei_smartpartition:partitionname': 'test_partition_name', 'capabilities:thin_provisioning': ' True', 'test:test:test': 'test', } fake_share_type_id = 'fooid-2' self.fake_type_w_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' False', 'huawei_smartcache:cachename': None, 'capabilities:huawei_smartpartition': ' False', 'huawei_smartpartition:partitionname': None, 'capabilities:thin_provisioning': ' True', 'test:test:test': 'test', } fake_share_type_id = 'fooid-3' self.fake_type_fake_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' False', 'huawei_smartcache:cachename': None, 'capabilities:huawei_smartpartition': ' False', 'huawei_smartpartition:partitionname': None, 'capabilities:thin_provisioning': ' False', 'test:test:test': 'test', } fake_share_type_id = 'fooid-4' self.fake_type_thin_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } self.share_nfs_host_not_exist = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#', } self.share_nfs_storagepool_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool2', } fake_extra_specs = { 'driver_handles_share_servers': 'False', } fake_share_type_id = 'fake_id' self.fake_type_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } def _get_share_by_proto(self, share_proto): if share_proto == "NFS": share = self.share_nfs elif share_proto == "CIFS": share = self.share_cifs else: share = None return share def test_conf_product_fail(self): self.recreate_fake_conf_file(product_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_pool_node_fail(self): self.recreate_fake_conf_file(pool_node_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_username_fail(self): self.recreate_fake_conf_file(username_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_timeout_fail(self): self.recreate_fake_conf_file(timeout_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) timeout = self.driver.plugin._get_timeout() self.assertEqual(60, timeout) def test_conf_wait_interval_fail(self): self.recreate_fake_conf_file(wait_interval_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) wait_interval = self.driver.plugin._get_wait_interval() self.assertEqual(3, wait_interval) def test_conf_logical_ip_fail(self): self.configuration.driver_handles_share_servers = True self.recreate_fake_conf_file(logical_port="fake_port") self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.configuration.driver_handles_share_servers = False self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_get_backend_driver_fail(self): test_fake_conf_file = None self.driver.plugin.configuration.manila_huawei_conf_file = ( test_fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.get_backend_driver) def test_get_backend_driver_fail_driver_none(self): self.recreate_fake_conf_file(product_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.get_backend_driver) def test_create_share_alloctype_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.recreate_fake_conf_file(alloctype_value='alloctype_fail') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_storagepool_not_exist(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidHost, self.driver.create_share, self._context, self.share_nfs_host_not_exist, self.share_server) def test_create_share_nfs_storagepool_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidHost, self.driver.create_share, self._context, self.share_nfs_storagepool_fail, self.share_server) def test_create_share_nfs_no_data_fail(self): self.driver.plugin.helper.create_share_data_flag = True self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_read_xml_fail(self): test_fake_conf_file = None self.driver.plugin.configuration.manila_huawei_conf_file = ( test_fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.helper._read_xml) def test_connect_fail(self): self.driver.plugin.configuration = None self.assertRaises(exception.InvalidInput, self.driver.plugin.connect) def test_login_success(self): deviceid = self.driver.plugin.helper.login() self.assertEqual("210235G7J20000000000", deviceid) def test_check_for_setup_success(self): self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_service_down(self): self.driver.plugin.helper.service_status_flag = False self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_nfs_down(self): self.driver.plugin.helper.service_nfs_status_flag = False self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_service_false(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.check_for_setup_error) def test_create_share_alloctype_thin_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.recreate_fake_conf_file(alloctype_value='Thin') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_alloctype_thick_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.recreate_fake_conf_file(alloctype_value='Thick') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THICK_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_no_alloctype_no_extra(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.recreate_fake_conf_file(alloctype_value=None) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THICK_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_with_extra_thin(self): share_type = { 'extra_specs': { 'capabilities:thin_provisioning': ' True' }, } self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_with_extra_thick(self): share_type = { 'extra_specs': { 'capabilities:thin_provisioning': ' False' }, } self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THICK_FLAG, self.driver.plugin.helper.alloc_type) def test_shrink_share_success(self): self.driver.plugin.helper.shrink_share_flag = False self.driver.plugin.helper.login() self.driver.shrink_share(self.share_nfs, 1, self.share_server) self.assertTrue(self.driver.plugin.helper.shrink_share_flag) def test_shrink_share_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_shrink_share_size_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 5, self.share_server) def test_shrink_share_alloctype_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.fs_status_flag = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_shrink_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_extend_share_success(self): self.driver.plugin.helper.extend_share_flag = False self.driver.plugin.helper.login() self.driver.extend_share(self.share_nfs, 5, self.share_server) self.assertTrue(self.driver.plugin.helper.extend_share_flag) def test_extend_share_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.extend_share, self.share_nfs, 3, self.share_server) def test_extend_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShareAccess, self.driver.extend_share, self.share_nfs, 4, self.share_server) def test_create_share_nfs_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_share_cifs_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_cifs, self.share_server) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_share_with_extra(self): self.driver.plugin.helper.add_fs_to_partition_flag = False self.driver.plugin.helper.add_fs_to_cache_flag = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.recreate_fake_conf_file(alloctype_value='Thin') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertTrue(self.driver.plugin.helper.add_fs_to_partition_flag) self.assertTrue(self.driver.plugin.helper.add_fs_to_cache_flag) @ddt.data({'capabilities:dedupe': ' True', 'capabilities:thin_provisioning': ' False'}, {'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:thin_provisioning': ' False'}, {'capabilities:huawei_smartcache': ' True', 'huawei_smartcache:cachename': None}, {'capabilities:huawei_smartpartition': ' True', 'huawei_smartpartition:partitionname': None}, {'capabilities:huawei_smartcache': ' True'}, {'capabilities:huawei_smartpartition': ' True'}) def test_create_share_with_extra_error(self, fake_extra_specs): fake_share_type_id = 'fooid-2' fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs_thick, self.share_server) @ddt.data({"fake_extra_specs_qos": {"qos:maxIOPS": "100", "qos:maxBandWidth": "50", "qos:IOType": "0"}, "fake_qos_info": {"MAXIOPS": "100", "MAXBANDWIDTH": "50", "IOTYPE": "0", "LATENCY": "0", "NAME": "OpenStack_fake_qos"}}, {"fake_extra_specs_qos": {"qos:maxIOPS": "100", "qos:IOType": "1"}, "fake_qos_info": {"NAME": "fake_qos", "MAXIOPS": "100", "IOTYPE": "1", "LATENCY": "0"}}, {"fake_extra_specs_qos": {"qos:minIOPS": "100", "qos:minBandWidth": "50", 'qos:latency': "50", "qos:IOType": "0"}, "fake_qos_info": {"MINIOPS": "100", "MINBANDWIDTH": "50", "IOTYPE": "0", "LATENCY": "50", "NAME": "OpenStack_fake_qos"}}) @ddt.unpack def test_create_share_with_qos(self, fake_extra_specs_qos, fake_qos_info): fake_share_type_id = 'fooid-2' fake_extra_specs = {"capabilities:qos": " True"} fake_extra_specs.update(fake_extra_specs_qos) fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_qos_info_respons = { "error": { "code": 0 }, "data": [{ "ID": "11", "FSLIST": u'["1", "2", "3", "4"]', "LUNLIST": '[""]', "RUNNINGSTATUS": "2", }] } fake_qos_info_respons["data"][0].update(fake_qos_info) share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(helper.RestHelper, 'get_qos', mock.Mock(return_value=fake_qos_info_respons)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) @ddt.data({'capabilities:qos': ' True', 'qos:maxIOPS': -1}, {'capabilities:qos': ' True', 'qos:IOTYPE': 4}, {'capabilities:qos': ' True', 'qos:IOTYPE': 100}, {'capabilities:qos': ' True', 'qos:maxIOPS': 0}, {'capabilities:qos': ' True', 'qos:minIOPS': 0}, {'capabilities:qos': ' True', 'qos:minBandWidth': 0}, {'capabilities:qos': ' True', 'qos:maxBandWidth': 0}, {'capabilities:qos': ' True', 'qos:latency': 0}, {'capabilities:qos': ' True', 'qos:maxIOPS': 100}, {'capabilities:qos': ' True', 'qos:maxIOPS': 100, 'qos:minBandWidth': 100, 'qos:IOType': '0'}) def test_create_share_with_invalid_qos(self, fake_extra_specs): fake_share_type_id = 'fooid-2' fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_cache_not_exist(self): self.driver.plugin.helper.cache_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_add_share_to_cache_fail(self): opts = dict( huawei_smartcache='true', cachename=None, ) fsid = 4 smartcache = smartx.SmartCache(self.driver.plugin.helper) self.assertRaises(exception.InvalidInput, smartcache.add, opts, fsid) def test_create_share_partition_not_exist(self): self.driver.plugin.helper.partition_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_add_share_to_partition_fail(self): opts = dict( huawei_smartpartition='true', partitionname=None, ) fsid = 4 smartpartition = smartx.SmartPartition(self.driver.plugin.helper) self.assertRaises(exception.InvalidInput, smartpartition.add, opts, fsid) def test_login_fail(self): self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.plugin.helper.login) def test_create_share_nfs_fs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_nfs_status_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.fs_status_flag = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_cifs_fs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_cifs, self.share_server) def test_create_share_cifs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_share_flag = True self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_cifs, self.share_server) def test_create_share_nfs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_share_flag = True self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) @ddt.data({"share_proto": "NFS", "fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100", "IOType": "2", "FSLIST": u'["0", "1", "4"]'}}, {"share_proto": "CIFS", "fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100", "IOType": "2", "FSLIST": u'["4"]', "RUNNINGSTATUS": "2"}}) @ddt.unpack def test_delete_share_success(self, share_proto, fake_qos_info_respons): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False if share_proto == 'NFS': share = self.share_nfs else: share = self.share_cifs with mock.patch.object(helper.RestHelper, 'get_qos_info', return_value=fake_qos_info_respons): self.driver.delete_share(self._context, share, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_share_withoutqos_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.qos_support = True self.driver.delete_share(self._context, self.share_nfs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_check_snapshot_id_exist_fail(self): snapshot_id = "4" self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.plugin.helper._check_snapshot_id_exist, snapshot_id) def test_delete_share_nfs_fail_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.share_exist = False self.driver.delete_share(self._context, self.share_nfs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_share_cifs_success(self): self.driver.plugin.helper.delete_flag = False fake_qos_info_respons = { "ID": "11", "FSLIST": u'["1", "2", "3", "4"]', "LUNLIST": '[""]', "RUNNINGSTATUS": "2", } self.mock_object(helper.RestHelper, 'get_qos_info', mock.Mock(return_value=fake_qos_info_respons)) self.driver.plugin.helper.login() self.driver.delete_share(self._context, self.share_cifs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_get_network_allocations_number_dhss_true(self): self.configuration.driver_handles_share_servers = True number = self.driver.get_network_allocations_number() self.assertEqual(1, number) def test_get_network_allocations_number_dhss_false(self): self.configuration.driver_handles_share_servers = False number = self.driver.get_network_allocations_number() self.assertEqual(0, number) def test_create_nfsshare_from_nfssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_cifsshare_from_cifssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_cifs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_nfsshare_from_cifssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.access_id = None self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin. _get_access_id.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_cifsshare_from_nfssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(utils, 'execute', mock.Mock(return_value=("", ""))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_cifs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin. _get_access_id.called) self.assertEqual(7, utils.execute.call_count) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_share_from_snapshot_nonefs(self): self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, '_get_fsid_by_name', mock.Mock(return_value={})) self.assertRaises(exception.StorageResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper. _get_fsid_by_name.called) def test_create_share_from_notexistingsnapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = False self.assertRaises(exception.ShareSnapshotNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) def test_create_share_from_share_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.mock_object(self.driver.plugin, 'check_fs_status', mock.Mock(return_value={})) self.assertRaises(exception.StorageResourceException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.check_fs_status.called) def test_create_share_from_snapshot_share_error(self): self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin. _get_share_proto.called) def test_create_share_from_snapshot_allow_oldaccess_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value='NFS')) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.InvalidShareAccess, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin._get_share_proto.called) self.assertTrue(self.driver.plugin._get_access_id.called) self.assertTrue(self.driver.plugin.helper._get_share_by_name.called) def test_create_share_from_snapshot_mountshare_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(side_effect=exception. ShareMountException('err'))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareMountException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(1, self.driver.plugin. mount_share_to_host.call_count) def test_create_share_from_snapshot_allow_newaccess_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value='NFS')) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value='5')) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.InvalidShareAccess, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin._get_share_proto.called) self.assertTrue(self.driver.plugin._get_access_id.called) self.assertEqual(1, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin.helper. _get_share_by_name.called) self.assertEqual(1, self.driver.plugin. umount_share_from_host.call_count) def test_create_nfsshare_from_nfssnapshot_copydata_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(data_utils, 'Copy', mock.Mock(side_effect=Exception('err'))) self.mock_object(utils, 'execute', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareCopyDataException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(data_utils.Copy.called) self.assertEqual(2, utils.execute.call_count) def test_create_nfsshare_from_nfssnapshot_umountshare_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(side_effect=exception. ShareUmountException('err'))) self.mock_object(os, 'rmdir', mock.Mock(side_effect=Exception('err'))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin.copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertTrue(os.rmdir.called) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_get_share_stats_refresh_pool_not_exist(self): self.driver.plugin.helper.login() self.recreate_fake_conf_file(pool_node_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver._update_share_stats) def test_get_share_stats_refresh(self): self.driver.plugin.helper.login() self.driver._update_share_stats() expected = {} expected["share_backend_name"] = "fake_share_backend_name" expected["driver_handles_share_servers"] = False expected["vendor_name"] = 'Huawei' expected["driver_version"] = '1.2' expected["storage_protocol"] = 'NFS_CIFS' expected['reserved_percentage'] = 0 expected['total_capacity_gb'] = 0.0 expected['free_capacity_gb'] = 0.0 expected['qos'] = True expected["snapshot_support"] = True expected['replication_domain'] = None expected["pools"] = [] pool = dict( pool_name='OpenStack_Pool', total_capacity_gb=2.0, free_capacity_gb=1.0, allocated_capacity_gb=1.0, qos=True, reserved_percentage=0, compression=[True, False], dedupe=[True, False], max_over_subscription_ratio=1, provisioned_capacity_gb=1.0, thin_provisioning=[True, False], huawei_smartcache=[True, False], huawei_smartpartition=[True, False], ) expected["pools"].append(pool) self.assertEqual(expected, self.driver._stats) def test_allow_access_proto_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.allow_access, self._context, self.share_proto_fail, self.access_ip, self.share_server) def test_allow_access_ip_rw_success(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_nfs, self.access_ip, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_allow_access_ip_ro_success(self): access_ro = { 'access_type': 'ip', 'access_to': '1.2.3.4', 'access_level': 'ro', } self.driver.plugin.helper.login() self.allow_flag = False self.allow_ro_flag = False self.driver.allow_access(self._context, self.share_nfs, access_ro, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_ro_flag) def test_allow_access_nfs_user_success(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_nfs, self.access_user, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) @ddt.data( { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'group_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'domain\\user_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'domain\\group_name', 'access_level': 'rw', }, ) def test_allow_access_cifs_rw_success(self, access_user): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_cifs, access_user, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_allow_access_cifs_user_ro_success(self): access_ro = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'ro', } self.driver.plugin.helper.login() self.allow_flag = False self.allow_ro_flag = False self.driver.allow_access(self._context, self.share_cifs, access_ro, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_ro_flag) def test_allow_access_level_fail(self): access_fail = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'fail', } self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_cifs, access_fail, self.share_server) def test_update_access_add_delete(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.deny_flag = False add_rules = [self.access_ip] delete_rules = [self.access_ip_exist] self.driver.update_access(self._context, self.share_nfs, None, add_rules, delete_rules, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_update_access_nfs(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False rules = [self.access_ip, self.access_ip_exist] self.driver.update_access(self._context, self.share_nfs, rules, None, None, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_update_access_cifs(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False rules = [self.access_user, self.access_user_exist] self.driver.update_access(self._context, self.share_cifs, rules, None, None, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_update_access_rules_share_not_exist(self): self.driver.plugin.helper.login() rules = [self.access_ip] self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShareAccess, self.driver.update_access, self._context, self.share_nfs, rules, None, None, self.share_server) def test_get_share_client_type_fail(self): share_proto = 'fake_proto' self.assertRaises(exception.InvalidInput, self.driver.plugin.helper._get_share_client_type, share_proto) @ddt.data("NFS", "CIFS") def test_get_share_url_type(self, share_proto): share_url_type = self.driver.plugin.helper._get_share_url_type( share_proto) self.assertEqual(share_proto + 'HARE', share_url_type) def test_get_location_path_fail(self): share_name = 'share-fake-uuid' share_proto = 'fake_proto' self.assertRaises(exception.InvalidShareAccess, self.driver.plugin._get_location_path, share_name, share_proto) def test_allow_access_nfs_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_nfs, self.access_cert, self.share_server) def test_allow_access_cifs_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_cifs, self.access_ip, self.share_server) def test_deny_access_nfs_fail(self): self.driver.plugin.helper.login() result = self.driver.deny_access(self._context, self.share_nfs, self.access_cert, self.share_server) self.assertIsNone(result) def test_deny_access_not_exist_fail(self): self.driver.plugin.helper.login() access_ip_not_exist = { 'access_type': 'ip', 'access_to': '100.112.0.99', 'access_level': 'rw', } result = self.driver.deny_access(self._context, self.share_nfs, access_ip_not_exist, self.share_server) self.assertIsNone(result) def test_deny_access_cifs_fail(self): self.driver.plugin.helper.login() result = self.driver.deny_access(self._context, self.share_cifs, self.access_ip, self.share_server) self.assertIsNone(result) def test_allow_access_ip_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_deny_access_ip_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.driver.deny_access(self._context, self.share_nfs, self.access_ip, self.share_server) def test_allow_access_ip_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.allow_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_allow_access_user_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.allow_access, self._context, self.share_cifs, self.access_user, self.share_server) def test_deny_access_ip_success(self): self.driver.plugin.helper.login() self.deny_flag = False self.driver.deny_access(self._context, self.share_nfs, self.access_ip_exist, self.share_server) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_deny_access_user_success(self): self.driver.plugin.helper.login() self.deny_flag = False self.driver.deny_access(self._context, self.share_cifs, self.access_user_exist, self.share_server) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_deny_access_ip_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.deny_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_deny_access_user_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.deny_access, self._context, self.share_cifs, self.access_user, self.share_server) def test_create_nfs_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_snapflag = False self.driver.create_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.create_snapflag) def test_create_nfs_snapshot_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidInput, self.driver.create_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_create_cifs_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_snapflag = False self.driver.create_snapshot(self._context, self.cifs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.create_snapflag) def test_delete_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.snapshot_flag = True self.driver.delete_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_snapshot_not_exist_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.snapshot_flag = False self.driver.delete_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_create_nfs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_create_cifs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_snapshot, self._context, self.cifs_snapshot, self.share_server) def test_delete_nfs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.delete_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_delete_cifs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.delete_snapshot, self._context, self.cifs_snapshot, self.share_server) @ddt.data({"share_proto": "NFS", "path": ["100.115.10.68:/share_fake_manage_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\100.115.10.68\\share_fake_manage_uuid"]}) @ddt.unpack def test_manage_share_nfs_success(self, share_proto, path): if share_proto == "NFS": share = self.share_manage_nfs elif share_proto == "CIFS": share = self.share_manage_cifs share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"fs_alloctype": "THIN", "path": ["100.115.10.68:/share_fake_manage_uuid"]}, {"fs_alloctype": "THICK", "path": ["100.115.10.68:/share_fake_uuid_thickfs"]}) @ddt.unpack def test_manage_share_with_default_type(self, fs_alloctype, path): if fs_alloctype == "THIN": share = self.share_manage_nfs elif fs_alloctype == "THICK": share = self.share_nfs_thick_thickfs share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"path": ["100.115.10.68:/share_fake_uuid_inpartition"]}) @ddt.unpack def test_manage_share_remove_from_partition(self, path): share = self.share_nfs_inpartition share_type = self.fake_type_fake_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"flag": "share_not_exist", "exc": exception.InvalidShare}, {"flag": "fs_status_error", "exc": exception.InvalidShare}, {"flag": "poolname_not_match", "exc": exception.InvalidHost}) @ddt.unpack def test_manage_share_fail(self, flag, exc): share = None if flag == "share_not_exist": self.driver.plugin.helper.share_exist = False share = self.share_nfs elif flag == "fs_status_error": self.driver.plugin.helper.fs_status_flag = False share = self.share_nfs elif flag == "poolname_not_match": share = self.share_pool_name_not_match self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exc, self.driver.manage_existing, share, self.driver_options) def test_manage_share_thickfs_set_dedupe_fail(self): share = self.share_nfs_thick_thickfs self.driver.plugin.helper.login() share_type = self.fake_type_thin_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.manage_existing, share, self.driver_options) def test_manage_share_thickfs_not_match_thinpool_fail(self): share = self.share_nfs_thickfs self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidHost, self.driver.manage_existing, share, self.driver_options) @ddt.data({"flag": "old_cache_id", "exc": exception.InvalidInput}, {"flag": "not_old_cache_id", "exc": exception.InvalidInput}) @ddt.unpack def test_manage_share_cache_not_exist(self, flag, exc): share = None if flag == "old_cache_id": share = self.share_nfs_inpartition elif flag == "not_old_cache_id": share = self.share_nfs self.driver.plugin.helper.cache_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exc, self.driver.manage_existing, share, self.share_server) def test_manage_add_share_to_cache_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID='6', SMARTPARTITIONID=None, ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) def test_manage_notsetcache_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename=None, partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID='6', SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) @ddt.data({"flag": "old_partition_id", "exc": exception.InvalidInput}, {"flag": "not_old_partition_id", "exc": exception.InvalidInput}) @ddt.unpack def test_manage_share_partition_not_exist(self, flag, exc): share = None if flag == "old_partition_id": share = self.share_nfs_inpartition elif flag == "not_old_partition_id": share = self.share_nfs self.driver.plugin.helper.partition_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exc, self.driver.manage_existing, share, self.share_server) def test_manage_add_share_to_partition_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID=None, SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) def test_manage_notset_partition_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname=None, ) fs = dict( SMARTCACHEID=None, SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) @ddt.data({"share_proto": "NFS", "export_path": "fake_ip:/share_fake_uuid"}, {"share_proto": "NFS", "export_path": "fake_ip:/"}, {"share_proto": "NFS", "export_path": "100.112.0.1://share_fake_uuid"}, {"share_proto": "NFS", "export_path": None}, {"share_proto": "NFS", "export_path": "\\share_fake_uuid"}, {"share_proto": "CIFS", "export_path": "\\\\fake_ip\\share_fake_uuid"}, {"share_proto": "CIFS", "export_path": "\\dd\\100.115.10.68\\share_fake_uuid"}) @ddt.unpack def test_manage_export_path_fail(self, share_proto, export_path): share_manage_nfs_export_path_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': share_proto, 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': export_path}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id' } share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.manage_existing, share_manage_nfs_export_path_fail, self.driver_options) def test_manage_logical_port_ip_fail(self): self.recreate_fake_conf_file(logical_port="") self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exception.InvalidInput, self.driver.manage_existing, self.share_nfs, self.driver_options) def test_get_pool_success(self): self.driver.plugin.helper.login() pool_name = self.driver.get_pool(self.share_nfs_host_not_exist) self.assertEqual('OpenStack_Pool', pool_name) def test_get_pool_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False pool_name = self.driver.get_pool(self.share_nfs_host_not_exist) self.assertIsNone(pool_name) def test_multi_resturls_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.recreate_fake_conf_file(multi_url=True) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.driver.plugin.helper.test_multi_url_flag = 2 location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_multi_resturls_fail(self): self.recreate_fake_conf_file(multi_url=True) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.driver.plugin.helper.test_multi_url_flag = 1 self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) @dec_driver_handles_share_servers def test_setup_server_success(self): backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = { 'backend_details': backend_details } share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, fake_share_server) self.assertTrue(db.share_type_get.called) self.assertEqual((self.fake_network_allocations[0]['ip_address'] + ":/share_fake_uuid"), location) @dec_driver_handles_share_servers def test_setup_server_with_bond_port_success(self): self.recreate_fake_conf_file(logical_port='fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = { 'backend_details': backend_details } share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, fake_share_server) self.assertTrue(db.share_type_get.called) self.assertEqual((self.fake_network_allocations[0]['ip_address'] + ":/share_fake_uuid"), location) @dec_driver_handles_share_servers def test_setup_server_logical_port_exist(self): def call_logical_port_exist(*args, **kwargs): url = args[0] method = args[2] if url == "/LIF" and method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"4", "HOMEPORTID":"4", "IPV4ADDR":"111.111.111.109", "IPV4MASK":"255.255.255.0", "OPERATIONALSTATUS":"false"}]}""" elif url == "/LIF/4" and method == "PUT": data = """{"error":{"code":0}}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json self.mock_object(self.driver.plugin.helper, "create_logical_port") with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_logical_port_exist backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(backend_details['ip'], self.fake_network_allocations[0]['ip_address']) self.assertEqual( 0, self.driver.plugin.helper.create_logical_port.call_count) @dec_driver_handles_share_servers def test_setup_server_vlan_exist(self): def call_vlan_exist(*args, **kwargs): url = args[0] method = args[2] if url == "/vlan" and method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"4", "NAME":"fake_vlan", "PORTID":"4", "TAG":"2"}]}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json self.mock_object(self.driver.plugin.helper, "create_vlan") with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_vlan_exist backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(backend_details['ip'], self.fake_network_allocations[0]['ip_address']) self.assertEqual( 0, self.driver.plugin.helper.create_vlan.call_count) def test_setup_server_invalid_ipv4(self): netwot_info_invali_ipv4 = self.fake_network_info netwot_info_invali_ipv4['network_allocations'][0]['ip_address'] =\ "::1/128" self.assertRaises(exception.InvalidInput, self.driver._setup_server, netwot_info_invali_ipv4) @dec_driver_handles_share_servers def test_setup_server_network_type_error(self): vxlan_netwotk_info = self.fake_network_info vxlan_netwotk_info['network_type'] = 'vxlan' self.assertRaises(exception.NetworkBadConfigurationException, self.driver.setup_server, vxlan_netwotk_info) @dec_driver_handles_share_servers def test_setup_server_port_conf_miss(self): self.recreate_fake_conf_file(logical_port='') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_port_offline_error(self): self.mock_object(self.driver.plugin, '_get_online_port', mock.Mock(return_value=(None, None))) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin._get_online_port.called) @dec_driver_handles_share_servers def test_setup_server_port_not_exist(self): self.mock_object(self.driver.plugin.helper, 'get_port_id', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin.helper.get_port_id.called) @dec_driver_handles_share_servers def test_setup_server_port_type_not_exist(self): self.mock_object(self.driver.plugin, '_get_optimal_port', mock.Mock(return_value=('CTE0.A.H2', '8'))) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin._get_optimal_port.called) @dec_driver_handles_share_servers def test_setup_server_choose_eth_port(self): self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'NAME': 'fake_bond.10'}])) fake_network_info = self.fake_network_info backend_details = self.driver.setup_server(fake_network_info) self.assertTrue(self.driver.plugin.helper.get_all_vlan.called) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_choose_bond_port(self): self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'NAME': 'CTE0.A.H0.10'}])) fake_network_info = self.fake_network_info backend_details = self.driver.setup_server(fake_network_info) self.assertTrue(self.driver.plugin.helper.get_all_vlan.called) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_choose_least_logic_port(self): self.recreate_fake_conf_file( logical_port='CTE0.A.H0;CTE0.A.H2;CTE0.B.H0;BOND0') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) fake_network_info = { 'server_id': '0', 'segmentation_id': None, 'cidr': '111.111.111.0/24', 'network_allocations': self.fake_network_allocations, 'network_type': None, } self.mock_object(self.driver.plugin, '_get_online_port', mock.Mock(return_value=(['CTE0.A.H0', 'CTE0.A.H2', 'CTE0.B.H0'], ['BOND0']))) self.mock_object(self.driver.plugin.helper, 'get_all_logical_port', mock.Mock(return_value=[ {'HOMEPORTTYPE': constants.PORT_TYPE_ETH, 'HOMEPORTNAME': 'CTE0.A.H0'}, {'HOMEPORTTYPE': constants.PORT_TYPE_VLAN, 'HOMEPORTNAME': 'CTE0.B.H0.10'}, {'HOMEPORTTYPE': constants.PORT_TYPE_BOND, 'HOMEPORTNAME': 'BOND0'}])) self.mock_object(self.driver.plugin.helper, 'get_port_id', mock.Mock(return_value=4)) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.driver.plugin._get_online_port.assert_called_once_with( ['CTE0.A.H0', 'CTE0.A.H2', 'CTE0.B.H0', 'BOND0']) self.assertTrue(self.driver.plugin.helper.get_all_logical_port.called) self.driver.plugin.helper.get_port_id.assert_called_once_with( 'CTE0.A.H2', constants.PORT_TYPE_ETH) @dec_driver_handles_share_servers def test_setup_server_create_vlan_fail(self): def call_create_vlan_fail(*args, **kwargs): url = args[0] method = args[2] if url == "/vlan" and method == "POST": data = """{"error":{"code":1}}""" res_json = jsonutils.loads(data) return res_json else: return self.driver.plugin.helper.do_call(*args, **kwargs) with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_create_vlan_fail self.assertRaises(exception.InvalidShare, self.driver.setup_server, self.fake_network_info) @dec_driver_handles_share_servers def test_setup_server_create_logical_port_fail(self): def call_create_logical_port_fail(*args, **kwargs): url = args[0] method = args[2] if url == "/LIF" and method == "POST": data = """{"error":{"code":1}}""" res_json = jsonutils.loads(data) return res_json else: return self.driver.plugin.helper.do_call(*args, **kwargs) fake_network_info = self.fake_network_info fake_network_info['security_services'] = [ self.fake_active_directory, self.fake_ldap] self.mock_object(self.driver.plugin.helper, "delete_vlan") self.mock_object(self.driver.plugin.helper, "delete_AD_config") self.mock_object(self.driver.plugin.helper, "delete_LDAP_config") self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '1'}, {'DOMAINSTATUS': '0'}])) self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_create_logical_port_fail self.assertRaises(exception.InvalidShare, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) self.assertEqual( 1, self.driver.plugin.helper.delete_vlan.call_count) self.assertEqual( 1, self.driver.plugin.helper.delete_AD_config.call_count) self.assertEqual( 1, self.driver.plugin.helper.delete_LDAP_config.call_count) @dec_driver_handles_share_servers def test_setup_server_with_ad_domain_success(self): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_active_directory] self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock( side_effect=[None, {'DOMAINSTATUS': '0', 'FULLDOMAINNAME': 'huawei.com'}, {'DOMAINSTATUS': '1', 'FULLDOMAINNAME': 'huawei.com'}])) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @ddt.data( "100.97.5.87", "100.97.5.87,100.97.5.88", "100.97.5.87,100.97.5.88,100.97.5.89" ) @dec_driver_handles_share_servers def test_setup_server_with_ldap_domain_success(self, server_ips): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_ldap] fake_network_info['security_services'][0]['server'] = server_ips self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @dec_driver_handles_share_servers def test_setup_server_with_ldap_domain_fail(self): server_ips = "100.97.5.87,100.97.5.88,100.97.5.89,100.97.5.86" fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_ldap] fake_network_info['security_services'][0]['server'] = server_ips self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @ddt.data( {'type': 'fake_unsupport'}, {'type': 'active_directory', 'dns_ip': '', 'user': '', 'password': '', 'domain': ''}, {'type': 'ldap', 'server': '', 'domain': ''}, ) @dec_driver_handles_share_servers def test_setup_server_with_security_service_invalid(self, data): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [data] self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) @dec_driver_handles_share_servers def test_setup_server_with_security_service_number_invalid(self): fake_network_info = self.fake_network_info ss = [ {'type': 'fake_unsupport'}, {'type': 'active_directory', 'dns_ip': '', 'user': '', 'password': '', 'domain': ''}, {'type': 'ldap', 'server': '', 'domain': ''}, ] fake_network_info['security_services'] = ss self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) @dec_driver_handles_share_servers def test_setup_server_dns_exist_error(self): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_active_directory] self.mock_object(self.driver.plugin.helper, "get_DNS_ip_address", mock.Mock(return_value=['100.97.5.85'])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_DNS_ip_address.called) @dec_driver_handles_share_servers def test_setup_server_ad_exist_error(self): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_active_directory] self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock( return_value={'DOMAINSTATUS': '1', 'FULLDOMAINNAME': 'huawei.com'})) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @dec_driver_handles_share_servers def test_setup_server_ldap_exist_error(self): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_ldap] self.mock_object(self.driver.plugin.helper, "get_LDAP_config", mock.Mock( return_value={'LDAPSERVER': '100.97.5.87'})) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @dec_driver_handles_share_servers def test_setup_server_with_dns_fail(self): fake_network_info = self.fake_network_info fake_active_directory = self.fake_active_directory ip_list = "100.97.5.5,100.97.5.6,100.97.5.7,100.97.5.8" fake_active_directory['dns_ip'] = ip_list fake_network_info['security_services'] = [fake_active_directory] self.mock_object( self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '1'}])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @dec_driver_handles_share_servers def test_setup_server_with_ad_domain_fail(self): fake_network_info = self.fake_network_info fake_network_info['security_services'] = [self.fake_active_directory] self.mock_object(self.driver.plugin, '_get_wait_interval', mock.Mock(return_value=1)) self.mock_object(self.driver.plugin, '_get_timeout', mock.Mock(return_value=1)) self.mock_object( self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '0', 'FULLDOMAINNAME': 'huawei.com'}])) self.mock_object(self.driver.plugin.helper, "set_DNS_ip_address") self.assertRaises(exception.InvalidShare, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) self.assertTrue(self.driver.plugin._get_wait_interval.called) self.assertTrue(self.driver.plugin._get_timeout.called) self.assertEqual( 2, self.driver.plugin.helper.set_DNS_ip_address.call_count) def test_teardown_server_success(self): server_details = { "logical_port_id": "1", "vlan_id": "2", "ad_created": "1", "ldap_created": "1", } security_services = [ self.fake_ldap, self.fake_active_directory ] self.logical_port_deleted = False self.vlan_deleted = False self.ad_deleted = False self.ldap_deleted = False self.dns_deleted = False def fake_teardown_call(*args, **kwargs): url = args[0] method = args[2] if url.startswith("/LIF"): if method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"1"}]}""" elif method == "DELETE": data = """{"error":{"code":0}}""" self.logical_port_deleted = True elif url.startswith("/vlan"): if method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"2"}]}""" elif method == "DELETE": data = """{"error":{"code":1073813505}}""" self.vlan_deleted = True elif url == "/AD_CONFIG": if method == "PUT": data = """{"error":{"code":0}}""" self.ad_deleted = True elif method == "GET": if self.ad_deleted: data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"0"}}""" else: data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"1", "FULLDOMAINNAME":"huawei.com"}}""" else: data = """{"error":{"code":0}}""" elif url == "/LDAP_CONFIG": if method == "DELETE": data = """{"error":{"code":0}}""" self.ldap_deleted = True elif method == "GET": if self.ldap_deleted: data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "LDAPSERVER":"100.97.5.87", "BASEDN":"dc=huawei,dc=com"}}""" else: data = """{"error":{"code":0}}""" elif url == "/DNS_Server": if method == "GET": data = "{\"error\":{\"code\":0},\"data\":{\ \"ADDRESS\":\"[\\\"100.97.5.5\\\",\\\"\\\"]\"}}" elif method == "PUT": data = """{"error":{"code":0}}""" self.dns_deleted = True else: data = """{"error":{"code":0}}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = fake_teardown_call self.driver._teardown_server(server_details, security_services) self.assertTrue(self.logical_port_deleted) self.assertTrue(self.vlan_deleted) self.assertTrue(self.ad_deleted) self.assertTrue(self.ldap_deleted) self.assertTrue(self.dns_deleted) def test_teardown_server_with_already_deleted(self): server_details = { "logical_port_id": "1", "vlan_id": "2", "ad_created": "1", "ldap_created": "1", } security_services = [ self.fake_ldap, self.fake_active_directory ] self.mock_object(self.driver.plugin.helper, "check_logical_port_exists_by_id", mock.Mock(return_value=False)) self.mock_object(self.driver.plugin.helper, "check_vlan_exists_by_id", mock.Mock(return_value=False)) self.mock_object(self.driver.plugin.helper, "get_DNS_ip_address", mock.Mock(return_value=None)) self.mock_object(self.driver.plugin.helper, "get_AD_domain_name", mock.Mock(return_value=(False, None))) self.mock_object(self.driver.plugin.helper, "get_LDAP_domain_server", mock.Mock(return_value=(False, None))) self.driver._teardown_server(server_details, security_services) self.assertEqual(1, (self.driver.plugin.helper. check_logical_port_exists_by_id.call_count)) self.assertEqual(1, (self.driver.plugin.helper. check_vlan_exists_by_id.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_DNS_ip_address.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_AD_domain_name.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_LDAP_domain_server.call_count)) def test_teardown_server_with_vlan_logical_port_deleted(self): server_details = { "logical_port_id": "1", "vlan_id": "2", } self.mock_object(self.driver.plugin.helper, 'get_all_logical_port', mock.Mock(return_value=[{'ID': '4'}])) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'ID': '4'}])) self.driver._teardown_server(server_details, None) self.assertEqual(1, (self.driver.plugin.helper. get_all_logical_port.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_all_vlan.call_count)) def test_teardown_server_with_empty_detail(self): server_details = {} with mock.patch.object(connection.LOG, 'debug') as mock_debug: self.driver._teardown_server(server_details, None) mock_debug.assert_called_with('Server details are empty.') @ddt.data({"share_proto": "NFS", "path": ["100.115.10.68:/share_fake_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\100.115.10.68\\share_fake_uuid"]}) @ddt.unpack def test_ensure_share_sucess(self, share_proto, path): share = self._get_share_by_proto(share_proto) self.driver.plugin.helper.login() location = self.driver.ensure_share(self._context, share, self.share_server) self.assertEqual(path, location) @ddt.data({"share_proto": "NFS", "path": ["111.111.111.109:/share_fake_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\111.111.111.109\\share_fake_uuid"]}) @ddt.unpack @dec_driver_handles_share_servers def test_ensure_share_with_share_server_sucess(self, share_proto, path): share = self._get_share_by_proto(share_proto) backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = {'backend_details': backend_details} self.driver.plugin.helper.login() location = self.driver.ensure_share(self._context, share, fake_share_server) self.assertEqual(path, location) @ddt.data({"share_proto": "NFS"}, {"share_proto": "CIFS"}) @ddt.unpack def test_ensure_share_get_share_fail(self, share_proto): share = self._get_share_by_proto(share_proto) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.assertRaises(exception.ShareResourceNotFound, self.driver.ensure_share, self._context, share, self.share_server) def test_ensure_share_get_filesystem_status_fail(self): self.driver.plugin.helper.fs_status_flag = False share = self.share_nfs_thickfs self.driver.plugin.helper.login() self.assertRaises(exception.StorageResourceException, self.driver.ensure_share, self._context, share, self.share_server) def create_fake_conf_file(self, fake_conf_file, product_flag=True, username_flag=True, pool_node_flag=True, timeout_flag=True, wait_interval_flag=True, alloctype_value='Thick', multi_url=False, logical_port='100.115.10.68'): doc = xml.dom.minidom.Document() config = doc.createElement('Config') doc.appendChild(config) storage = doc.createElement('Storage') config.appendChild(storage) if self.configuration.driver_handles_share_servers: port0 = doc.createElement('Port') port0_text = doc.createTextNode(logical_port) port0.appendChild(port0_text) storage.appendChild(port0) else: controllerip0 = doc.createElement('LogicalPortIP') controllerip0_text = doc.createTextNode(logical_port) controllerip0.appendChild(controllerip0_text) storage.appendChild(controllerip0) if product_flag: product_text = doc.createTextNode('V3') else: product_text = doc.createTextNode('V3_fail') product = doc.createElement('Product') product.appendChild(product_text) storage.appendChild(product) if username_flag: username_text = doc.createTextNode('admin') else: username_text = doc.createTextNode('') username = doc.createElement('UserName') username.appendChild(username_text) storage.appendChild(username) userpassword = doc.createElement('UserPassword') userpassword_text = doc.createTextNode('Admin@storage') userpassword.appendChild(userpassword_text) storage.appendChild(userpassword) url = doc.createElement('RestURL') if multi_url: url_text = doc.createTextNode('http://100.115.10.69:8082/' 'deviceManager/rest/;' 'http://100.115.10.70:8082/' 'deviceManager/rest/') else: url_text = doc.createTextNode('http://100.115.10.69:8082/' 'deviceManager/rest/') url.appendChild(url_text) storage.appendChild(url) lun = doc.createElement('Filesystem') config.appendChild(lun) storagepool = doc.createElement('StoragePool') if pool_node_flag: pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2; ;') else: pool_text = doc.createTextNode('') storagepool.appendChild(pool_text) timeout = doc.createElement('Timeout') if timeout_flag: timeout_text = doc.createTextNode('60') else: timeout_text = doc.createTextNode('') timeout.appendChild(timeout_text) waitinterval = doc.createElement('WaitInterval') if wait_interval_flag: waitinterval_text = doc.createTextNode('3') else: waitinterval_text = doc.createTextNode('') waitinterval.appendChild(waitinterval_text) NFSClient = doc.createElement('NFSClient') virtualip = doc.createElement('IP') virtualip_text = doc.createTextNode('100.112.0.1') virtualip.appendChild(virtualip_text) NFSClient.appendChild(virtualip) CIFSClient = doc.createElement('CIFSClient') username = doc.createElement('UserName') username_text = doc.createTextNode('user_name') username.appendChild(username_text) CIFSClient.appendChild(username) userpassword = doc.createElement('UserPassword') userpassword_text = doc.createTextNode('user_password') userpassword.appendChild(userpassword_text) CIFSClient.appendChild(userpassword) lun.appendChild(NFSClient) lun.appendChild(CIFSClient) lun.appendChild(timeout) lun.appendChild(waitinterval) lun.appendChild(storagepool) if alloctype_value: alloctype = doc.createElement('AllocType') alloctype_text = doc.createTextNode(alloctype_value) alloctype.appendChild(alloctype_text) lun.appendChild(alloctype) prefetch = doc.createElement('Prefetch') prefetch.setAttribute('Type', '0') prefetch.setAttribute('Value', '0') lun.appendChild(prefetch) fakefile = open(fake_conf_file, 'w') fakefile.write(doc.toprettyxml(indent='')) fakefile.close() def recreate_fake_conf_file(self, product_flag=True, username_flag=True, pool_node_flag=True, timeout_flag=True, wait_interval_flag=True, alloctype_value='Thick', multi_url=False, logical_port='100.115.10.68'): self.tmp_dir = tempfile.mkdtemp() self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml' self.addCleanup(shutil.rmtree, self.tmp_dir) self.create_fake_conf_file(self.fake_conf_file, product_flag, username_flag, pool_node_flag, timeout_flag, wait_interval_flag, alloctype_value, multi_url, logical_port) self.addCleanup(os.remove, self.fake_conf_file) manila-2.0.0/manila/tests/share/drivers/test_service_instance.py0000664000567000056710000033322012701407107026235 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the instance module.""" import os import time import ddt import mock import netaddr from oslo_config import cfg from oslo_utils import importutils import six from manila import exception from manila.share import configuration from manila.share import driver # noqa from manila.share.drivers import service_instance from manila import test from manila.tests import fake_compute from manila.tests import fake_network from manila.tests import utils as test_utils CONF = cfg.CONF def fake_get_config_option(key): if key == 'driver_handles_share_servers': return True elif key == 'service_instance_password': return None elif key == 'service_instance_user': return 'fake_user' elif key == 'service_network_name': return 'fake_service_network_name' elif key == 'service_instance_flavor_id': return 100 elif key == 'service_instance_name_template': return 'fake_manila_service_instance_%s' elif key == 'service_image_name': return 'fake_service_image_name' elif key == 'manila_service_keypair_name': return 'fake_manila_service_keypair_name' elif key == 'path_to_private_key': return 'fake_path_to_private_key' elif key == 'path_to_public_key': return 'fake_path_to_public_key' elif key == 'max_time_to_build_instance': return 500 elif key == 'connect_share_server_to_tenant_network': return False elif key == 'service_network_cidr': return '99.254.0.0/24' elif key == 'service_network_division_mask': return 27 elif key == 'service_instance_network_helper_type': return service_instance.NEUTRON_NAME elif key == 'service_network_name': return 'fake_service_network_name' elif key == 'interface_driver': return 'i.am.fake.VifDriver' elif key == 'admin_network_id': return None elif key == 'admin_subnet_id': return None else: return mock.Mock() class FakeServiceInstance(object): def __init__(self, driver_config=None): super(FakeServiceInstance, self).__init__() self.compute_api = service_instance.compute.API() self.admin_context = service_instance.context.get_admin_context() self.driver_config = driver_config def get_config_option(self, key): return fake_get_config_option(key) class FakeNetworkHelper(service_instance.BaseNetworkhelper): @property def NAME(self): return self.get_config_option("service_instance_network_helper_type") def __init__(self, service_instance_manager): self.get_config_option = service_instance_manager.get_config_option def get_network_name(self, network_info): """Return name of network.""" return 'fake_network_name' def setup_connectivity_with_service_instances(self): """Nothing to do in fake network helper.""" def setup_network(self, network_info): """Combine fake network data.""" return dict() def teardown_network(self, server_details): """Nothing to do in fake network helper.""" @ddt.ddt class ServiceInstanceManagerTestCase(test.TestCase): """Test suite for service instance manager.""" def setUp(self): super(ServiceInstanceManagerTestCase, self).setUp() self.instance_id = 'fake_instance_id' self.config = configuration.Configuration(None) self.config.safe_get = mock.Mock(side_effect=fake_get_config_option) self.mock_object(service_instance.compute, 'API', fake_compute.API) self.mock_object( service_instance.os.path, 'exists', mock.Mock(return_value=True)) self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) self.mock_object(service_instance, 'NovaNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) self._manager = service_instance.ServiceInstanceManager(self.config) self._manager._execute = mock.Mock(return_value=('', '')) self.mock_object(time, 'sleep') def test_get_config_option_from_driver_config(self): username1 = 'fake_username_1_%s' % self.id() username2 = 'fake_username_2_%s' % self.id() config_data = dict( DEFAULT=dict(service_instance_user=username1), CUSTOM=dict(service_instance_user=username2)) with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration( service_instance.common_opts, config_group='CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) result = self._manager.get_config_option('service_instance_user') self.assertEqual(username2, result) def test_get_config_option_from_common_config(self): username = 'fake_username_%s' % self.id() config_data = dict(DEFAULT=dict(service_instance_user=username)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() result = self._manager.get_config_option('service_instance_user') self.assertEqual(username, result) def test_get_nova_network_helper(self): # Mock it again, because one of these was called in setUp method. self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=True, service_instance_network_helper_type=service_instance.NOVA_NAME)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self._manager.network_helper service_instance.NovaNetworkHelper.assert_called_once_with( self._manager) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_get_neutron_network_helper(self): # Mock it again, because one of these was called in setUp method. self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=True, service_instance_network_helper_type=service_instance.NEUTRON_NAME) ) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self._manager.network_helper service_instance.NeutronNetworkHelper.assert_called_once_with( self._manager) self.assertFalse(service_instance.NovaNetworkHelper.called) @ddt.data( None, '', 'fake', service_instance.NOVA_NAME + '_as_prefix', service_instance.NEUTRON_NAME + '_as_prefix', 'as_suffix_' + service_instance.NOVA_NAME, 'as_suffix_' + service_instance.NEUTRON_NAME) def test_get_fake_network_helper(self, value): # Mock it again, because one of these was called in setUp method. self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=True, service_instance_network_helper_type=value)) with test_utils.create_temp_config_with_opts(config_data): manager = service_instance.ServiceInstanceManager() self.assertRaises(exception.ManilaException, lambda: manager.network_helper) self.assertFalse(service_instance.NeutronNetworkHelper.called) self.assertFalse(service_instance.NovaNetworkHelper.called) def test_init_with_driver_config_and_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=True, service_instance_user='fake_user', service_instance_network_helper_type=service_instance.NOVA_NAME)) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) self.assertEqual( True, self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNotNone(self._manager.driver_config) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NovaNetworkHelper.called) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_init_with_driver_config_and_wo_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=False, service_instance_user='fake_user')) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) self.assertIsNotNone(self._manager.driver_config) self.assertFalse(hasattr(self._manager, 'network_helper')) self.assertFalse(service_instance.NovaNetworkHelper.called) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_init_with_common_config_and_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=True, service_instance_network_helper_type=service_instance.NOVA_NAME)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self.assertEqual( True, self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNone(self._manager.driver_config) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NovaNetworkHelper.called) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_init_with_common_config_and_wo_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=False)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self.assertEqual( False, self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNone(self._manager.driver_config) self.assertFalse(hasattr(self._manager, 'network_helper')) self.assertFalse(service_instance.NovaNetworkHelper.called) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_no_service_user_defined(self): group_name = 'GROUP_%s' % self.id() config_data = {group_name: dict()} with test_utils.create_temp_config_with_opts(config_data): config = configuration.Configuration( service_instance.common_opts, config_group=group_name) self.assertRaises( exception.ServiceInstanceException, service_instance.ServiceInstanceManager, config) def test_get_service_instance_name_using_driver_config(self): fake_server_id = 'fake_share_server_id_%s' % self.id() self.mock_object(service_instance, 'NeutronNetworkHelper') self.mock_object(service_instance, 'NovaNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=True, service_instance_user='fake_user', service_instance_network_helper_type=service_instance.NOVA_NAME)) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) result = self._manager._get_service_instance_name(fake_server_id) self.assertIsNotNone(self._manager.driver_config) self.assertEqual( self._manager.get_config_option( "service_instance_name_template") % "%s_%s" % ( self._manager.driver_config.config_group, fake_server_id), result) self.assertEqual( True, self._manager.get_config_option("driver_handles_share_servers")) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NovaNetworkHelper.called) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_get_service_instance_name_using_default_config(self): fake_server_id = 'fake_share_server_id_%s' % self.id() config_data = dict(CUSTOM=dict( service_instance_user='fake_user', service_instance_network_helper_type=service_instance.NOVA_NAME)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() result = self._manager._get_service_instance_name(fake_server_id) self.assertIsNone(self._manager.driver_config) self.assertEqual( self._manager.get_config_option( "service_instance_name_template") % fake_server_id, result) def test__check_server_availability_available_from_start(self): fake_server = dict(id='fake_server', ip='127.0.0.1') self.mock_object(service_instance.socket.socket, 'connect') self.mock_object(service_instance.time, 'sleep') self.mock_object(service_instance.time, 'time', mock.Mock(return_value=0)) result = self._manager._check_server_availability(fake_server) self.assertTrue(result) service_instance.socket.socket.connect.assert_called_once_with( (fake_server['ip'], 22)) service_instance.time.time.assert_has_calls([ mock.call(), mock.call()]) service_instance.time.time.assert_has_calls([]) @ddt.data(True, False) def test__check_server_availability_with_recall(self, is_ok): fake_server = dict(id='fake_server', ip='fake_ip_address') self.fake_time = 0 def fake_connect(addr): if not(is_ok and self.fake_time > 1): raise service_instance.socket.error def fake_time(): return self.fake_time def fake_sleep(time): self.fake_time += 5 self.mock_object(service_instance.time, 'sleep', mock.Mock(side_effect=fake_sleep)) self.mock_object(service_instance.socket.socket, 'connect', mock.Mock(side_effect=fake_connect)) self.mock_object(service_instance.time, 'time', mock.Mock(side_effect=fake_time)) self._manager.max_time_to_build_instance = 6 result = self._manager._check_server_availability(fake_server) if is_ok: self.assertTrue(result) else: self.assertFalse(result) service_instance.socket.socket.connect.assert_has_calls([ mock.call((fake_server['ip'], 22)), mock.call((fake_server['ip'], 22))]) service_instance.time.time.assert_has_calls([ mock.call(), mock.call(), mock.call()]) service_instance.time.time.assert_has_calls([mock.call()]) def test_get_server_ip_found_in_networks_section(self): ip = '10.0.0.1' net_name = self._manager.get_config_option('service_network_name') fake_server = dict(networks={net_name: [ip]}) result = self._manager._get_server_ip(fake_server, net_name) self.assertEqual(ip, result) def test_get_server_ip_found_in_addresses_section(self): ip = '10.0.0.1' net_name = self._manager.get_config_option('service_network_name') fake_server = dict(addresses={net_name: [dict(addr=ip, version=4)]}) result = self._manager._get_server_ip(fake_server, net_name) self.assertEqual(ip, result) @ddt.data( {}, {'networks': {fake_get_config_option('service_network_name'): []}}, {'addresses': {fake_get_config_option('service_network_name'): []}}) def test_get_server_ip_not_found(self, data): self.assertRaises( exception.ManilaException, self._manager._get_server_ip, data, fake_get_config_option('service_network_name')) def test_security_group_name_not_specified(self): self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=None)) result = self._manager._get_or_create_security_group( self._manager.admin_context) self.assertIsNone(result) self._manager.get_config_option.assert_called_once_with( 'service_instance_security_group') def test_security_group_name_from_config_and_sg_exist(self): fake_secgroup = fake_compute.FakeSecurityGroup(name="fake_sg_name") self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value="fake_sg_name")) self.mock_object(self._manager.compute_api, 'security_group_list', mock.Mock(return_value=[fake_secgroup, ])) result = self._manager._get_or_create_security_group( self._manager.admin_context) self.assertEqual(fake_secgroup, result) self._manager.get_config_option.assert_has_calls([ mock.call('service_instance_security_group'), ]) self._manager.compute_api.security_group_list.assert_called_once_with( self._manager.admin_context) def test_security_group_creation_with_name_from_config(self): name = "fake_sg_name" desc = "fake_sg_description" fake_secgroup = fake_compute.FakeSecurityGroup(name=name, description=desc) self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=name)) self.mock_object(self._manager.compute_api, 'security_group_list', mock.Mock(return_value=[])) self.mock_object(self._manager.compute_api, 'security_group_create', mock.Mock(return_value=fake_secgroup)) self.mock_object(self._manager.compute_api, 'security_group_rule_create') result = self._manager._get_or_create_security_group( context=self._manager.admin_context, name=None, description=desc, ) self.assertEqual(fake_secgroup, result) self._manager.compute_api.security_group_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.security_group_create.\ assert_called_once_with(self._manager.admin_context, name, desc) self._manager.get_config_option.assert_has_calls([ mock.call('service_instance_security_group'), ]) def test_security_group_creation_with_provided_name(self): name = "fake_sg_name" fake_secgroup = fake_compute.FakeSecurityGroup(name=name) self.mock_object(self._manager.compute_api, 'security_group_list', mock.Mock(return_value=[])) self.mock_object(self._manager.compute_api, 'security_group_create', mock.Mock(return_value=fake_secgroup)) self.mock_object(self._manager.compute_api, 'security_group_rule_create') result = self._manager._get_or_create_security_group( context=self._manager.admin_context, name=name) self._manager.compute_api.security_group_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.security_group_create.\ assert_called_once_with( self._manager.admin_context, name, mock.ANY) self.assertEqual(fake_secgroup, result) def test_security_group_two_sg_in_list(self): name = "fake_name" fake_secgroup1 = fake_compute.FakeSecurityGroup(name=name) fake_secgroup2 = fake_compute.FakeSecurityGroup(name=name) self.mock_object(self._manager.compute_api, 'security_group_list', mock.Mock(return_value=[fake_secgroup1, fake_secgroup2])) self.assertRaises(exception.ServiceInstanceException, self._manager._get_or_create_security_group, self._manager.admin_context, name) self._manager.compute_api.security_group_list.assert_called_once_with( self._manager.admin_context) @ddt.data( dict(), dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'), ) def test_set_up_service_instance(self, update_data): fake_network_info = {'foo': 'bar', 'server_id': 'fake_server_id'} fake_server = { 'id': 'fake', 'ip': '1.2.3.4', 'public_address': '1.2.3.4', 'pk_path': None, 'subnet_id': 'fake-subnet-id', 'router_id': 'fake-router-id', 'username': self._manager.get_config_option( 'service_instance_user'), 'admin_ip': 'admin_ip'} fake_server.update(update_data) expected_details = fake_server.copy() expected_details.pop('pk_path') expected_details['instance_id'] = expected_details.pop('id') self.mock_object(self._manager, '_create_service_instance', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability') result = self._manager.set_up_service_instance( self._manager.admin_context, fake_network_info) self._manager._create_service_instance.assert_called_once_with( self._manager.admin_context, fake_network_info['server_id'], fake_network_info) self._manager._check_server_availability.assert_called_once_with( expected_details) self.assertEqual(expected_details, result) def test_set_up_service_instance_not_available(self): fake_network_info = {'foo': 'bar', 'server_id': 'fake_server_id'} fake_server = { 'id': 'fake', 'ip': '1.2.3.4', 'public_address': '1.2.3.4', 'pk_path': None, 'subnet_id': 'fake-subnet-id', 'router_id': 'fake-router-id', 'username': self._manager.get_config_option( 'service_instance_user'), 'admin_ip': 'admin_ip'} expected_details = fake_server.copy() expected_details.pop('pk_path') expected_details['instance_id'] = expected_details.pop('id') self.mock_object(self._manager, '_create_service_instance', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=False)) result = self.assertRaises( exception.ServiceInstanceException, self._manager.set_up_service_instance, self._manager.admin_context, fake_network_info) self.assertTrue(hasattr(result, 'detail_data')) self.assertEqual( {'server_details': expected_details}, result.detail_data) self._manager._create_service_instance.assert_called_once_with( self._manager.admin_context, fake_network_info['server_id'], fake_network_info) self._manager._check_server_availability.assert_called_once_with( expected_details) def test_ensure_server(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} fake_server = fake_compute.FakeServer() self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=fake_server)) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self._manager._check_server_availability.assert_called_once_with( server_details) self.assertTrue(result) def test_ensure_server_not_exists(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.InstanceNotFound( instance_id=server_details['instance_id']))) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self.assertFalse(self._manager._check_server_availability.called) self.assertFalse(result) def test_ensure_server_exception(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._manager.ensure_service_instance, self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self.assertFalse(self._manager._check_server_availability.called) def test_ensure_server_non_active(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} fake_server = fake_compute.FakeServer(status='ERROR') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self.assertFalse(self._manager._check_server_availability.called) self.assertFalse(result) def test_ensure_server_no_instance_id(self): # Tests that we avoid a KeyError if the share details don't have an # instance_id key set (so we can't find the share instance). self.assertFalse(self._manager.ensure_service_instance( self._manager.admin_context, {'ip': '1.2.3.4'})) def test_get_key_create_new(self): keypair_name = self._manager.get_config_option( 'manila_service_keypair_name') fake_keypair = fake_compute.FakeKeypair(name=keypair_name) self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) result = self._manager._get_key(self._manager.admin_context) self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.keypair_import.assert_called_once_with( self._manager.admin_context, keypair_name, '') def test_get_key_exists(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.mock_object(self._manager, '_execute', mock.Mock(return_value=('fake_public_key', ''))) result = self._manager._get_key(self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self.assertFalse(self._manager.compute_api.keypair_import.called) self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) def test_get_key_exists_recreate(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key1') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.mock_object(self._manager.compute_api, 'keypair_delete') self.mock_object(self._manager, '_execute', mock.Mock(return_value=('fake_public_key2', ''))) result = self._manager._get_key(self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.keypair_delete.assert_called_once_with( self._manager.admin_context, fake_keypair.id) self._manager.compute_api.keypair_import.assert_called_once_with( self._manager.admin_context, fake_keypair.name, 'fake_public_key2') self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) def test_get_key_more_than_one_exist(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key1') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair, fake_keypair])) self.assertRaises( exception.ServiceInstanceException, self._manager._get_key, self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) def test_get_key_keypath_to_public_not_set(self): self._manager.path_to_public_key = None result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_keypath_to_private_not_set(self): self._manager.path_to_private_key = None result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_incorrect_keypath_to_public(self): def exists_side_effect(path): return False if path == 'fake_path' else True self._manager.path_to_public_key = 'fake_path' os_path_exists_mock = mock.Mock(side_effect=exists_side_effect) with mock.patch.object(os.path, 'exists', os_path_exists_mock): with mock.patch.object(os.path, 'expanduser', mock.Mock(side_effect=lambda value: value)): result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_incorrect_keypath_to_private(self): def exists_side_effect(path): return False if path == 'fake_path' else True self._manager.path_to_private_key = 'fake_path' os_path_exists_mock = mock.Mock(side_effect=exists_side_effect) with mock.patch.object(os.path, 'exists', os_path_exists_mock): with mock.patch.object(os.path, 'expanduser', mock.Mock(side_effect=lambda value: value)): result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_service_image(self): fake_image1 = fake_compute.FakeImage( name=self._manager.get_config_option('service_image_name')) fake_image2 = fake_compute.FakeImage(name='another-image') self.mock_object(self._manager.compute_api, 'image_list', mock.Mock(return_value=[fake_image1, fake_image2])) result = self._manager._get_service_image(self._manager.admin_context) self.assertEqual(fake_image1.id, result) def test_get_service_image_not_found(self): self.mock_object(self._manager.compute_api, 'image_list', mock.Mock(return_value=[])) self.assertRaises( exception.ServiceInstanceException, self._manager._get_service_image, self._manager.admin_context) def test_get_service_image_ambiguous(self): fake_image = fake_compute.FakeImage( name=fake_get_config_option('service_image_name')) fake_images = [fake_image, fake_image] self.mock_object(self._manager.compute_api, 'image_list', mock.Mock(return_value=fake_images)) self.assertRaises( exception.ServiceInstanceException, self._manager._get_service_image, self._manager.admin_context) def test__delete_server_not_found(self): self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object( self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.InstanceNotFound( instance_id=self.instance_id))) self._manager._delete_server( self._manager.admin_context, self.instance_id) self.assertFalse(self._manager.compute_api.server_delete.called) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, self.instance_id) def test__delete_server(self): def fake_server_get(*args, **kwargs): ctx = args[0] if not hasattr(ctx, 'called'): ctx.called = True return else: raise exception.InstanceNotFound(instance_id=self.instance_id) self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=fake_server_get)) self._manager._delete_server( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_delete.assert_called_once_with( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_get.assert_has_calls([ mock.call(self._manager.admin_context, self.instance_id), mock.call(self._manager.admin_context, self.instance_id)]) def test__delete_server_found_always(self): self.fake_time = 0 def fake_time(): return self.fake_time def fake_sleep(time): self.fake_time += 1 self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object(self._manager.compute_api, 'server_get') self.mock_object(service_instance, 'time') self.mock_object( service_instance.time, 'time', mock.Mock(side_effect=fake_time)) self.mock_object( service_instance.time, 'sleep', mock.Mock(side_effect=fake_sleep)) self.mock_object(self._manager, 'max_time_to_build_instance', 2) self.assertRaises( exception.ServiceInstanceException, self._manager._delete_server, self._manager.admin_context, self.instance_id) self._manager.compute_api.server_delete.assert_called_once_with( self._manager.admin_context, self.instance_id) service_instance.time.sleep.assert_has_calls( [mock.call(mock.ANY) for i in range(2)]) service_instance.time.time.assert_has_calls( [mock.call() for i in range(4)]) self._manager.compute_api.server_get.assert_has_calls( [mock.call(self._manager.admin_context, self.instance_id) for i in range(3)]) def test_delete_service_instance(self): fake_server_details = dict( router_id='foo', subnet_id='bar', instance_id='quuz') self.mock_object(self._manager, '_delete_server') self.mock_object(self._manager.network_helper, 'teardown_network') self._manager.delete_service_instance( self._manager.admin_context, fake_server_details) self._manager._delete_server.assert_called_once_with( self._manager.admin_context, fake_server_details['instance_id']) self._manager.network_helper.teardown_network.assert_called_once_with( fake_server_details) @ddt.data( *[{'s': s, 't': t, 'server': server} for s, t in ( ('fake_net_s', 'fake_net_t'), ('fake_net_s', '12.34.56.78'), ('98.76.54.123', 'fake_net_t'), ('98.76.54.123', '12.34.56.78')) for server in ( {'networks': { 'fake_net_s': ['foo', '98.76.54.123', 'bar'], 'fake_net_t': ['baar', '12.34.56.78', 'quuz']}}, {'addresses': { 'fake_net_s': [ {'addr': 'fake1'}, {'addr': '98.76.54.123'}, {'addr': 'fake2'}], 'fake_net_t': [ {'addr': 'fake3'}, {'addr': '12.34.56.78'}, {'addr': 'fake4'}], }})]) @ddt.unpack def test_get_common_server_valid_cases(self, s, t, server): self._get_common_server(s, t, server, True) @ddt.data( *[{'s': s, 't': t, 'server': server} for s, t in ( ('fake_net_s', 'fake'), ('fake', 'fake_net_t'), ('fake', 'fake'), ('98.76.54.123', '12.12.12.1212'), ('12.12.12.1212', '12.34.56.78'), ('12.12.12.1212', '12.12.12.1212')) for server in ( {'networks': { 'fake_net_s': ['foo', '98.76.54.123', 'bar'], 'fake_net_t': ['baar', '12.34.56.78', 'quuz']}}, {'addresses': { 'fake_net_s': [ {'addr': 'fake1'}, {'addr': '98.76.54.123'}, {'addr': 'fake2'}], 'fake_net_t': [ {'addr': 'fake3'}, {'addr': '12.34.56.78'}, {'addr': 'fake4'}], }})]) @ddt.unpack def test_get_common_server_invalid_cases(self, s, t, server): self._get_common_server(s, t, server, False) def _get_common_server(self, s, t, server, is_valid=True): fake_instance_id = 'fake_instance_id' fake_user = 'fake_user' fake_pass = 'fake_pass' fake_addr_s = '98.76.54.123' fake_addr_t = '12.34.56.78' fake_server = {'id': fake_instance_id} fake_server.update(server) expected = { 'backend_details': { 'username': fake_user, 'password': fake_pass, 'pk_path': self._manager.path_to_private_key, 'ip': fake_addr_s, 'public_address': fake_addr_t, 'instance_id': fake_instance_id, } } def fake_get_config_option(attr): if attr == 'service_net_name_or_ip': return s elif attr == 'tenant_net_name_or_ip': return t elif attr == 'service_instance_name_or_id': return fake_instance_id elif attr == 'service_instance_user': return fake_user elif attr == 'service_instance_password': return fake_pass else: raise exception.ManilaException("Wrong test data provided.") self.mock_object( self._manager.compute_api, 'server_get_by_name_or_id', mock.Mock(return_value=fake_server)) self.mock_object( self._manager, 'get_config_option', mock.Mock(side_effect=fake_get_config_option)) if is_valid: actual = self._manager.get_common_server() self.assertEqual(expected, actual) else: self.assertRaises( exception.ManilaException, self._manager.get_common_server) self.assertTrue( self._manager.compute_api.server_get_by_name_or_id.called) @ddt.data(service_instance.NOVA_NAME, service_instance.NEUTRON_NAME) def test___create_service_instance_with_sg_success(self, helper_type): self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) self.mock_object(service_instance, 'NovaNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) config_data = dict(DEFAULT=dict( driver_handles_share_servers=True, service_instance_user='fake_user', service_instance_network_helper_type=helper_type)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() server_create = dict(id='fakeid', status='CREATING', networks=dict()) net_name = self._manager.get_config_option("service_network_name") sg = type('FakeSG', (object, ), dict(id='fakeid', name='fakename')) ip_address = 'fake_ip_address' service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = {'nics': ['fake_nic1', 'fake_nic2']} if helper_type == service_instance.NEUTRON_NAME: network_data['router'] = dict(id='fake_router_id') server_get = dict( id='fakeid', status='ACTIVE', networks={net_name: [ip_address]}) if helper_type == service_instance.NEUTRON_NAME: network_data.update(dict( router_id='fake_router_id', subnet_id='fake_subnet_id', public_port=dict(id='fake_public_port', fixed_ips=[dict(ip_address=ip_address)]), service_port=dict(id='fake_service_port', fixed_ips=[{'ip_address': ip_address}]), admin_port={'id': 'fake_admin_port', 'fixed_ips': [{'ip_address': ip_address}]})) self.mock_object(service_instance.time, 'time', mock.Mock(return_value=5)) self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager.network_helper, 'get_network_name', mock.Mock(return_value=net_name)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager, '_get_or_create_security_group', mock.Mock(return_value=sg)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_get)) self.mock_object(self._manager.compute_api, 'add_security_group_to_server') expected = { 'id': server_get['id'], 'status': server_get['status'], 'pk_path': key_data[1], 'public_address': ip_address, 'router_id': network_data.get('router_id'), 'subnet_id': network_data.get('subnet_id'), 'instance_id': server_get['id'], 'ip': ip_address, 'networks': server_get['networks']} if helper_type == service_instance.NEUTRON_NAME: expected['router_id'] = network_data['router']['id'] expected['public_port_id'] = 'fake_public_port' expected['service_port_id'] = 'fake_service_port' expected['admin_port_id'] = 'fake_admin_port' expected['admin_ip'] = 'fake_ip_address' result = self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) self.assertEqual(expected, result) self.assertTrue(service_instance.time.time.called) self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager._get_or_create_security_group.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor=100, key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_create['id']) if helper_type == service_instance.NEUTRON_NAME: self._manager.compute_api.add_security_group_to_server.\ assert_called_once_with( self._manager.admin_context, server_get['id'], sg.id) self._manager.network_helper.get_network_name.assert_has_calls([]) else: self._manager.compute_api.add_security_group_to_server.\ assert_called_once_with( self._manager.admin_context, server_get['id'], sg.name) self._manager.network_helper.get_network_name.\ assert_called_once_with(network_info) def test___create_service_instance_neutron_no_admin_ip(self): self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) config_data = {'DEFAULT': { 'driver_handles_share_servers': True, 'service_instance_user': 'fake_user', 'service_instance_network_helper_type': ( service_instance.NEUTRON_NAME)}} with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() server_create = {'id': 'fakeid', 'status': 'CREATING', 'networks': {}} net_name = self._manager.get_config_option("service_network_name") sg = type('FakeSG', (object, ), {'id': 'fakeid', 'name': 'fakename'}) ip_address = 'fake_ip_address' service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = {} network_data = { 'nics': ['fake_nic1', 'fake_nic2'], 'router_id': 'fake_router_id', 'subnet_id': 'fake_subnet_id', 'public_port': {'id': 'fake_public_port', 'fixed_ips': [{'ip_address': ip_address}]}, 'service_port': {'id': 'fake_service_port', 'fixed_ips': [{'ip_address': ip_address}]}, 'admin_port': {'id': 'fake_admin_port', 'fixed_ips': []}, 'router': {'id': 'fake_router_id'}} server_get = { 'id': 'fakeid', 'status': 'ACTIVE', 'networks': {net_name: [ip_address]}} self.mock_object(service_instance.time, 'time', mock.Mock(return_value=5)) self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager.network_helper, 'get_network_name', mock.Mock(return_value=net_name)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager, '_get_or_create_security_group', mock.Mock(return_value=sg)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_get)) self.mock_object(self._manager.compute_api, 'add_security_group_to_server') self.assertRaises( exception.AdminIPNotFound, self._manager._create_service_instance, self._manager.admin_context, instance_name, network_info) self.assertTrue(service_instance.time.time.called) self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager._get_or_create_security_group.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor=100, key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_create['id']) self._manager.compute_api.add_security_group_to_server.\ assert_called_once_with( self._manager.admin_context, server_get['id'], sg.id) self._manager.network_helper.get_network_name.assert_has_calls([]) @ddt.data( dict( instance_id_included=False, mockobj=mock.Mock(side_effect=exception.ServiceInstanceException)), dict( instance_id_included=True, mockobj=mock.Mock(return_value=dict(id='fakeid', status='ERROR')))) @ddt.unpack def test___create_service_instance_failed_to_create( self, instance_id_included, mockobj): service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = dict( nics=['fake_nic1', 'fake_nic2'], router_id='fake_router_id', subnet_id='fake_subnet_id') self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object( self._manager.compute_api, 'server_create', mockobj) self.mock_object( self._manager, 'wait_for_instance_to_be_active', mock.Mock(side_effect=exception.ServiceInstanceException)) try: self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) except exception.ServiceInstanceException as e: expected = dict(server_details=dict( subnet_id=network_data['subnet_id'], router_id=network_data['router_id'])) if instance_id_included: expected['server_details']['instance_id'] = 'fakeid' self.assertEqual(expected, e.detail_data) else: raise exception.ManilaException('Expected error was not raised.') self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor=100, key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) def test___create_service_instance_failed_to_build(self): server_create = dict(id='fakeid', status='CREATING', networks=dict()) service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = dict( nics=['fake_nic1', 'fake_nic2'], router_id='fake_router_id', subnet_id='fake_subnet_id') self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object( self._manager, 'wait_for_instance_to_be_active', mock.Mock(side_effect=exception.ServiceInstanceException)) try: self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) except exception.ServiceInstanceException as e: self.assertEqual( dict(server_details=dict(subnet_id=network_data['subnet_id'], router_id=network_data['router_id'], instance_id=server_create['id'])), e.detail_data) else: raise exception.ManilaException('Expected error was not raised.') self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor=100, key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) @ddt.data( dict(name=None, path=None), dict(name=None, path='/tmp')) @ddt.unpack def test__create_service_instance_no_key_and_no_path(self, name, path): key_data = name, path self.mock_object(self._manager, '_get_service_image') self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.assertRaises( exception.ServiceInstanceException, self._manager._create_service_instance, self._manager.admin_context, 'fake_instance_name', dict()) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) @mock.patch('time.sleep') @mock.patch('time.time') def _test_wait_for_instance(self, mock_time, mock_sleep, server_get_side_eff=None, expected_try_count=1, expected_sleep_count=0, expected_ret_val=None, expected_exc=None): mock_server_get = mock.Mock(side_effect=server_get_side_eff) self.mock_object(self._manager.compute_api, 'server_get', mock_server_get) self.fake_time = 0 def fake_time(): return self.fake_time def fake_sleep(sleep_time): self.fake_time += sleep_time # Note(lpetrut): LOG methods can call time.time mock_time.side_effect = fake_time mock_sleep.side_effect = fake_sleep timeout = 3 if expected_exc: self.assertRaises( expected_exc, self._manager.wait_for_instance_to_be_active, instance_id=mock.sentinel.instance_id, timeout=timeout) else: instance = self._manager.wait_for_instance_to_be_active( instance_id=mock.sentinel.instance_id, timeout=timeout) self.assertEqual(expected_ret_val, instance) mock_server_get.assert_has_calls( [mock.call(self._manager.admin_context, mock.sentinel.instance_id)] * expected_try_count) mock_sleep.assert_has_calls([mock.call(1)] * expected_sleep_count) def test_wait_for_instance_timeout(self): server_get_side_eff = [ exception.InstanceNotFound( instance_id=mock.sentinel.instance_id), {'status': 'BUILDING'}, {'status': 'ACTIVE'}] # Note that in this case, although the status is active, the # 'networks' field is missing. self._test_wait_for_instance( server_get_side_eff=server_get_side_eff, expected_exc=exception.ServiceInstanceException, expected_try_count=3, expected_sleep_count=3) def test_wait_for_instance_error_state(self): mock_instance = {'status': 'ERROR'} self._test_wait_for_instance( server_get_side_eff=[mock_instance], expected_exc=exception.ServiceInstanceException, expected_try_count=1) def test_wait_for_instance_available(self): mock_instance = {'status': 'ACTIVE', 'networks': mock.sentinel.networks} self._test_wait_for_instance( server_get_side_eff=[mock_instance], expected_try_count=1, expected_ret_val=mock_instance) def test_reboot_server(self): fake_server = {'instance_id': mock.sentinel.instance_id} soft_reboot = True mock_reboot = mock.Mock() self.mock_object(self._manager.compute_api, 'server_reboot', mock_reboot) self._manager.reboot_server(fake_server, soft_reboot) mock_reboot.assert_called_once_with(self._manager.admin_context, fake_server['instance_id'], soft_reboot) class BaseNetworkHelperTestCase(test.TestCase): """Tests Base network helper for service instance.""" def test_instantiate_valid(self): class FakeNetworkHelper(service_instance.BaseNetworkhelper): @property def NAME(self): return 'fake_NAME' def __init__(self, service_instance_manager): self.fake_init = 'fake_init_value' def get_network_name(self, network_info): return 'fake_network_name' def setup_connectivity_with_service_instances(self): return 'fake_setup_connectivity_with_service_instances' def setup_network(self, network_info): return 'fake_setup_network' def teardown_network(self, server_details): return 'fake_teardown_network' instance = FakeNetworkHelper('fake') attrs = [ 'fake_init', 'NAME', 'get_network_name', 'teardown_network', 'setup_connectivity_with_service_instances', 'setup_network', ] for attr in attrs: self.assertTrue(hasattr(instance, attr)) self.assertEqual('fake_init_value', instance.fake_init) self.assertEqual('fake_NAME', instance.NAME) self.assertEqual( 'fake_network_name', instance.get_network_name('fake')) self.assertEqual( 'fake_setup_connectivity_with_service_instances', instance.setup_connectivity_with_service_instances()) self.assertEqual('fake_setup_network', instance.setup_network('fake')) self.assertEqual( 'fake_teardown_network', instance.teardown_network('fake')) def test_instantiate_invalid(self): self.assertRaises( TypeError, service_instance.BaseNetworkhelper, 'fake') @ddt.ddt class NeutronNetworkHelperTestCase(test.TestCase): """Tests Neutron network helper for service instance.""" def setUp(self): super(NeutronNetworkHelperTestCase, self).setUp() self.mock_object(importutils, 'import_class') self.fake_manager = FakeServiceInstance() def _init_neutron_network_plugin(self): self.mock_object( service_instance.NeutronNetworkHelper, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) return service_instance.NeutronNetworkHelper(self.fake_manager) def test_init_neutron_network_plugin(self): instance = self._init_neutron_network_plugin() self.assertEqual(service_instance.NEUTRON_NAME, instance.NAME) attrs = [ 'neutron_api', 'vif_driver', 'service_network_id', 'connect_share_server_to_tenant_network', 'get_config_option'] for attr in attrs: self.assertTrue(hasattr(instance, attr), "No attr '%s'" % attr) service_instance.NeutronNetworkHelper._get_service_network_id.\ assert_called_once_with() self.assertEqual('DEFAULT', instance.neutron_api.config_group_name) def test_init_neutron_network_plugin_with_driver_config_group(self): self.fake_manager.driver_config = mock.Mock() self.fake_manager.driver_config.config_group =\ 'fake_config_group' self.fake_manager.driver_config.network_config_group = None instance = self._init_neutron_network_plugin() self.assertEqual('fake_config_group', instance.neutron_api.config_group_name) def test_init_neutron_network_plugin_with_network_config_group(self): self.fake_manager.driver_config = mock.Mock() self.fake_manager.driver_config.config_group =\ "fake_config_group" self.fake_manager.driver_config.network_config_group =\ "fake_network_config_group" instance = self._init_neutron_network_plugin() self.assertEqual('fake_network_config_group', instance.neutron_api.config_group_name) def test_admin_project_id(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' self.mock_class('manila.network.neutron.api.API', mock.Mock()) instance.neutron_api.admin_project_id = admin_project_id self.assertEqual(admin_project_id, instance.admin_project_id) def test_get_network_name(self): network_info = dict(neutron_net_id='fake_neutron_net_id') network = dict(name='fake_network_name') instance = self._init_neutron_network_plugin() self.mock_object( instance.neutron_api, 'get_network', mock.Mock(return_value=network)) result = instance.get_network_name(network_info) self.assertEqual(network['name'], result) instance.neutron_api.get_network.assert_called_once_with( network_info['neutron_net_id']) def test_get_service_network_id_none_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id') admin_project_id = 'fake_admin_project_id' self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[])) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'network_create', mock.Mock(return_value=network)) instance = service_instance.NeutronNetworkHelper(self.fake_manager) result = instance._get_service_network_id() self.assertEqual(network['id'], result) self.assertTrue(service_instance.neutron.API. get_all_admin_project_networks.called) service_instance.neutron.API.network_create.assert_has_calls([ mock.call(instance.admin_project_id, service_network_name)]) def test_get_service_network_id_one_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id', name=service_network_name) admin_project_id = 'fake_admin_project_id' self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[network])) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) instance = service_instance.NeutronNetworkHelper(self.fake_manager) result = instance._get_service_network_id() self.assertEqual(network['id'], result) self.assertTrue(service_instance.neutron.API. get_all_admin_project_networks.called) def test_get_service_network_id_two_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id', name=service_network_name) self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[network, network])) helper = service_instance.NeutronNetworkHelper(self.fake_manager) self.assertRaises(exception.ManilaException, lambda: helper.service_network_id) service_instance.neutron.API.get_all_admin_project_networks.\ assert_has_calls([mock.call()]) @ddt.data(dict(), dict(subnet_id='foo'), dict(router_id='bar')) def test_teardown_network_no_service_data(self, server_details): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') instance.teardown_network(server_details) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) @ddt.data( *[dict(server_details=sd, fail=f) for f in (True, False) for sd in (dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'))] ) @ddt.unpack def test_teardown_network_with_ports(self, server_details, fail): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') if fail: delete_port_mock = mock.Mock( side_effect=exception.NetworkException(code=404)) else: delete_port_mock = mock.Mock() self.mock_object(instance.neutron_api, 'delete_port', delete_port_mock) self.mock_object(service_instance.LOG, 'debug') instance.teardown_network(server_details) self.assertFalse(instance.neutron_api.router_remove_interface.called) self.assertEqual( len(server_details), len(instance.neutron_api.delete_port.mock_calls)) for k, v in server_details.items(): self.assertIn( mock.call(v), instance.neutron_api.delete_port.mock_calls) if fail: service_instance.LOG.debug.assert_has_calls([ mock.call(mock.ANY, mock.ANY) for sd in server_details ]) else: service_instance.LOG.debug.assert_has_calls([]) @ddt.data( dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'), ) def test_teardown_network_with_ports_unhandled_exception(self, server_details): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') delete_port_mock = mock.Mock( side_effect=exception.NetworkException(code=500)) self.mock_object( service_instance.neutron.API, 'delete_port', delete_port_mock) self.mock_object(service_instance.LOG, 'debug') self.assertRaises( exception.NetworkException, instance.teardown_network, server_details, ) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) service_instance.neutron.API.delete_port.assert_called_once_with( mock.ANY) service_instance.LOG.debug.assert_has_calls([]) def test_teardown_network_with_wrong_ports(self): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'delete_port') self.mock_object(service_instance.LOG, 'debug') instance.teardown_network(dict(foo_id='fake_service_port_id')) service_instance.neutron.API.router_remove_interface.assert_has_calls( []) service_instance.neutron.API.delete_port.assert_has_calls([]) service_instance.LOG.debug.assert_has_calls([]) def test_teardown_network_subnet_is_used(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'fixed_ips': [{'subnet_id': server_details['subnet_id']}], 'device_id': 'fake_device_id', 'device_owner': 'compute:foo'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) self.assertFalse(service_instance.neutron.API.update_subnet.called) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['fixed_ips', 'device_id', 'device_owner']) def test_teardown_network_subnet_not_used(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'fixed_ips': [{'subnet_id': server_details['subnet_id']}], 'device_id': 'fake_device_id', 'device_owner': 'network:router_interface'}, {'fixed_ips': [{'subnet_id': 'bar' + server_details['subnet_id']}], 'device_id': 'fake_device_id', 'device_owner': 'compute'}, {'fixed_ips': [{'subnet_id': server_details['subnet_id']}], 'device_id': '', 'device_owner': 'compute'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) service_instance.neutron.API.router_remove_interface.\ assert_called_once_with('bar', 'foo') service_instance.neutron.API.update_subnet.\ assert_called_once_with('foo', '') service_instance.neutron.API.list_ports.assert_called_once_with( fields=['fixed_ips', 'device_id', 'device_owner']) def test_teardown_network_subnet_not_used_and_get_error_404(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'fixed_ips': [{'subnet_id': server_details['subnet_id']}], 'device_id': 'fake_device_id', 'device_owner': 'fake'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface', mock.Mock(side_effect=exception.NetworkException(code=404))) self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) service_instance.neutron.API.router_remove_interface.\ assert_called_once_with('bar', 'foo') service_instance.neutron.API.update_subnet.\ assert_called_once_with('foo', '') service_instance.neutron.API.list_ports.assert_called_once_with( fields=['fixed_ips', 'device_id', 'device_owner']) def test_teardown_network_subnet_not_used_get_unhandled_error(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'fixed_ips': [{'subnet_id': server_details['subnet_id']}], 'device_id': 'fake_device_id', 'device_owner': 'fake'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface', mock.Mock(side_effect=exception.NetworkException(code=500))) self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) self.assertRaises( exception.NetworkException, instance.teardown_network, server_details) service_instance.neutron.API.router_remove_interface.\ assert_called_once_with('bar', 'foo') self.assertFalse(service_instance.neutron.API.update_subnet.called) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['fixed_ips', 'device_id', 'device_owner']) def test_setup_network_and_connect_share_server_to_tenant_net(self): def fake_create_port(*aargs, **kwargs): if aargs[1] == 'fake_service_network_id': return self.service_port elif aargs[1] == 'fake_tenant_network_id': return self.public_port else: raise exception.ManilaException('Got unexpected data') admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) self.public_port = dict( id='fake_tenant_port_id', fixed_ips=[dict(ip_address='fake_public_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = True self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=fake_create_port)) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object( instance, 'setup_connectivity_with_service_instances', mock.Mock(return_value=service_subnet)) self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) expected = { 'ip_address': self.public_port['fixed_ips'][0]['ip_address'], 'public_port': self.public_port, 'service_port': self.service_port, 'service_subnet': service_subnet, 'ports': [self.public_port, self.service_port], 'nics': [{'port-id': self.public_port['id']}, {'port-id': self.service_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) instance.setup_connectivity_with_service_instances.\ assert_called_once_with() instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) def test_setup_network_and_connect_share_server_to_tenant_net_admin(self): def fake_create_port(*aargs, **kwargs): if aargs[1] == 'fake_admin_network_id': return self.admin_port elif aargs[1] == 'fake_tenant_network_id': return self.public_port else: raise exception.ManilaException('Got unexpected data') admin_project_id = 'fake_admin_project_id' network_info = { 'neutron_net_id': 'fake_tenant_network_id', 'neutron_subnet_id': 'fake_tenant_subnet_id'} self.admin_port = { 'id': 'fake_admin_port_id', 'fixed_ips': [{'ip_address': 'fake_admin_port_ip_address'}]} self.public_port = { 'id': 'fake_tenant_port_id', 'fixed_ips': [{'ip_address': 'fake_public_port_ip_address'}]} instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.use_service_network = False instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' instance.connect_share_server_to_tenant_network = True self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=fake_create_port)) self.mock_object( instance, 'setup_connectivity_with_service_instances') expected = { 'ip_address': self.public_port['fixed_ips'][0]['ip_address'], 'public_port': self.public_port, 'admin_port': self.admin_port, 'ports': [self.public_port, self.admin_port], 'nics': [{'port-id': self.public_port['id']}, {'port-id': self.admin_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) instance.setup_connectivity_with_service_instances.\ assert_called_once_with() self.assertTrue(service_instance.neutron.API.create_port.called) @ddt.data(None, exception.NetworkException(code=400)) def test_setup_network_using_router_success(self, return_obj): admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.admin_port = { 'id': 'fake_admin_port_id', 'fixed_ips': [{'ip_address': 'fake_admin_port_ip_address'}]} self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=[self.service_port, self.admin_port])) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface', mock.Mock(side_effect=return_obj)) self.mock_object(instance, 'setup_connectivity_with_service_instances') self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) expected = { 'ip_address': self.service_port['fixed_ips'][0]['ip_address'], 'service_port': self.service_port, 'service_subnet': service_subnet, 'admin_port': self.admin_port, 'router': router, 'ports': [self.service_port, self.admin_port], 'nics': [{'port-id': self.service_port['id']}, {'port-id': self.admin_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) instance.setup_connectivity_with_service_instances.\ assert_called_once_with() instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) service_instance.neutron.API.router_add_interface.\ assert_called_once_with(router['id'], service_subnet['id']) def test_setup_network_using_router_addon_of_interface_failed(self): network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface', mock.Mock(side_effect=exception.NetworkException(code=500))) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=service_subnet)) self.assertRaises( exception.NetworkException, instance.setup_network, network_info) instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) service_instance.neutron.API.router_add_interface.\ assert_called_once_with(router['id'], service_subnet['id']) def test_setup_network_using_router_connectivity_verification_fail(self): admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(return_value=self.service_port)) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object(service_instance.neutron.API, 'delete_port') self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface') self.mock_object( instance, 'setup_connectivity_with_service_instances', mock.Mock(side_effect=exception.ManilaException('Fake'))) self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) self.assertRaises( exception.ManilaException, instance.setup_network, network_info) instance.setup_connectivity_with_service_instances.\ assert_called_once_with() instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) service_instance.neutron.API.router_add_interface.\ assert_called_once_with(router['id'], service_subnet['id']) service_instance.neutron.API.delete_port.assert_has_calls([ mock.call(self.service_port['id'])]) def test__get_cidr_for_subnet_success(self): expected = ( fake_get_config_option('service_network_cidr').split('/')[0] + '/' + six.text_type( fake_get_config_option('service_network_division_mask'))) instance = self._init_neutron_network_plugin() self.mock_object( instance, '_get_all_service_subnets', mock.Mock(return_value=[])) result = instance._get_cidr_for_subnet() self.assertEqual(expected, result) instance._get_all_service_subnets.assert_called_once_with() def test__get_cidr_for_subnet_failure(self): subnets = [] serv_cidr = netaddr.IPNetwork( fake_get_config_option('service_network_cidr')) division_mask = fake_get_config_option('service_network_division_mask') for subnet in serv_cidr.subnet(division_mask): subnets.append(dict(cidr=six.text_type(subnet.cidr))) instance = self._init_neutron_network_plugin() self.mock_object( instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) self.assertRaises( exception.ServiceInstanceException, instance._get_cidr_for_subnet) instance._get_all_service_subnets.assert_called_once_with() def test_setup_connectivity_with_service_instances(self): instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' interface_name_service = 'fake_interface_name_service' interface_name_admin = 'fake_interface_name_admin' fake_division_mask = fake_get_config_option( 'service_network_division_mask') fake_subnet_service = fake_network.FakeSubnet( cidr='10.254.0.0/%s' % fake_division_mask) fake_subnet_admin = fake_network.FakeSubnet(id='fake_admin_subnet_id', cidr='10.0.0.0/24') fake_service_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet_service['id'], 'ip_address': '10.254.0.2'}], mac_address='fake_mac_address') fake_admin_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet_admin['id'], 'ip_address': '10.0.0.4'}], mac_address='fake_mac_address') self.mock_object(instance, '_get_service_port', mock.Mock(side_effect=[fake_service_port, fake_admin_port])) self.mock_object(instance, '_add_fixed_ips_to_service_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.vif_driver, 'get_device_name', mock.Mock(side_effect=[interface_name_service, interface_name_admin])) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(side_effect=[fake_subnet_service, fake_subnet_admin, fake_subnet_admin])) self.mock_object(instance, '_remove_outdated_interfaces') self.mock_object(instance.vif_driver, 'plug') device_mock = mock.Mock() self.mock_object(service_instance.ip_lib, 'IPDevice', mock.Mock(return_value=device_mock)) instance.setup_connectivity_with_service_instances() instance._get_service_port.assert_has_calls([ mock.call(instance.service_network_id, None, 'manila-share'), mock.call('fake_admin_network_id', 'fake_admin_subnet_id', 'manila-admin-share')]) instance.vif_driver.get_device_name.assert_has_calls([ mock.call(fake_service_port), mock.call(fake_admin_port)]) instance.vif_driver.plug.assert_has_calls([ mock.call(interface_name_service, fake_service_port['id'], fake_service_port['mac_address']), mock.call(interface_name_admin, fake_admin_port['id'], fake_admin_port['mac_address'])]) instance.neutron_api.get_subnet.assert_has_calls([ mock.call(fake_subnet_service['id']), mock.call(fake_subnet_admin['id']), mock.call(fake_subnet_admin['id'])]) instance.vif_driver.init_l3.assert_has_calls([ mock.call(interface_name_service, ['10.254.0.2/%s' % fake_division_mask]), mock.call(interface_name_admin, ['10.0.0.4/24'])]) service_instance.ip_lib.IPDevice.assert_has_calls([ mock.call(interface_name_service), mock.call(interface_name_admin)]) device_mock.route.pullup_route.assert_has_calls([ mock.call(interface_name_service), mock.call(interface_name_admin)]) instance._remove_outdated_interfaces.assert_called_with(device_mock) def test__get_set_of_device_cidrs(self): device = fake_network.FakeDevice('foo') expected = set(('1.0.0.0/27', '2.0.0.0/27')) instance = self._init_neutron_network_plugin() result = instance._get_set_of_device_cidrs(device) self.assertEqual(expected, result) def test__get_set_of_device_cidrs_exception(self): device = fake_network.FakeDevice('foo') self.mock_object(device.addr, 'list', mock.Mock( side_effect=Exception('foo does not exist'))) instance = self._init_neutron_network_plugin() result = instance._get_set_of_device_cidrs(device) self.assertEqual(set(), result) def test__remove_outdated_interfaces(self): device = fake_network.FakeDevice( 'foobarquuz', [dict(ip_version=4, cidr='1.0.0.0/27')]) devices = [fake_network.FakeDevice('foobar')] instance = self._init_neutron_network_plugin() self.mock_object(instance.vif_driver, 'unplug') self.mock_object( service_instance.ip_lib.IPWrapper, 'get_devices', mock.Mock(return_value=devices)) instance._remove_outdated_interfaces(device) instance.vif_driver.unplug.assert_called_once_with('foobar') def test__get_service_port_none_exist(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' fake_port_values = {'device_id': 'manila-share', 'binding:host_id': 'fake_host'} self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) fake_service_port = fake_network.FakePort(device_id='manila-share') self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.mock_object(service_instance.socket, 'gethostname', mock.Mock(return_value='fake_host')) self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port_values) instance.neutron_api.create_port.assert_called_once_with( instance.admin_project_id, instance.service_network_id, device_id='manila-share', device_owner='manila:share', host_id='fake_host', subnet_id=None) service_instance.socket.gethostname.assert_called_once_with() self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_one_exist_on_same_host(self): instance = self._init_neutron_network_plugin() fake_port_values = {'device_id': 'manila-share', 'binding:host_id': 'fake_host'} fake_service_port = fake_network.FakePort(**fake_port_values) self.mock_object(service_instance.socket, 'gethostname', mock.Mock(return_value='fake_host')) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port])) self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port_values) self.assertFalse(instance.neutron_api.create_port.called) self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_one_exist_on_different_host(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' fake_port = {'device_id': 'manila-share', 'binding:host_id': 'fake_host'} self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) fake_service_port = fake_network.FakePort(**fake_port) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.mock_object(service_instance.socket, 'gethostname', mock.Mock(return_value='fake_host')) self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port) instance.neutron_api.create_port.assert_called_once_with( instance.admin_project_id, instance.service_network_id, device_id='manila-share', device_owner='manila:share', host_id='fake_host', subnet_id=None) service_instance.socket.gethostname.assert_called_once_with() self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_two_exist_on_same_host(self): instance = self._init_neutron_network_plugin() fake_service_port = fake_network.FakePort(**{ 'device_id': 'manila-share', 'binding:host_id': 'fake_host'}) self.mock_object( instance.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port, fake_service_port])) self.mock_object(service_instance.socket, 'gethostname', mock.Mock(return_value='fake_host')) self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.assertRaises( exception.ServiceInstanceException, instance._get_service_port, instance.service_network_id, None, 'manila-share') self.assertFalse(instance.neutron_api.create_port.called) def test__add_fixed_ips_to_service_port(self): ip_address1 = '13.0.0.13' subnet_id1 = 'fake_subnet_id1' subnet_id2 = 'fake_subnet_id2' port = dict(id='fooport', fixed_ips=[dict( subnet_id=subnet_id1, ip_address=ip_address1)]) expected = mock.Mock() network = dict(subnets=[subnet_id1, subnet_id2]) instance = self._init_neutron_network_plugin() self.mock_object(instance.neutron_api, 'get_network', mock.Mock(return_value=network)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=expected)) result = instance._add_fixed_ips_to_service_port(port) self.assertEqual(expected, result) instance.neutron_api.get_network.assert_called_once_with( instance.service_network_id) instance.neutron_api.update_port_fixed_ips.assert_called_once_with( port['id'], dict(fixed_ips=[ dict(subnet_id=subnet_id1, ip_address=ip_address1), dict(subnet_id=subnet_id2)])) def test__get_private_router_success(self): instance = self._init_neutron_network_plugin() network = fake_network.FakeNetwork() subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') router = fake_network.FakeRouter(id='fake_router_id') port = fake_network.FakePort(fixed_ips=[ dict(subnet_id=subnet['id'], ip_address=subnet['gateway_ip'])], device_id=router['id']) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[port])) self.mock_object(instance.neutron_api, 'show_router', mock.Mock(return_value=router)) result = instance._get_private_router(network['id'], subnet['id']) self.assertEqual(router, result) instance.neutron_api.get_subnet.assert_called_once_with(subnet['id']) instance.neutron_api.list_ports.assert_called_once_with( network_id=network['id']) instance.neutron_api.show_router.assert_called_once_with(router['id']) def test__get_private_router_no_gateway(self): instance = self._init_neutron_network_plugin() subnet = fake_network.FakeSubnet(gateway_ip='') self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.assertRaises( exception.ServiceInstanceException, instance._get_private_router, 'fake_network_id', subnet['id']) instance.neutron_api.get_subnet.assert_called_once_with( subnet['id']) def test__get_private_router_subnet_is_not_attached_to_the_router(self): instance = self._init_neutron_network_plugin() network_id = 'fake_network_id' subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.assertRaises( exception.ServiceInstanceException, instance._get_private_router, network_id, subnet['id']) instance.neutron_api.get_subnet.assert_called_once_with( subnet['id']) instance.neutron_api.list_ports.assert_called_once_with( network_id=network_id) def test__get_service_subnet_none_found(self): subnet_name = 'fake_subnet_name' instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=[])) result = instance._get_service_subnet(subnet_name) self.assertIsNone(result) instance._get_all_service_subnets.assert_called_once_with() def test__get_service_subnet_unused_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name=''), fake_network.FakeSubnet(id='bar', name='quuz')] instance = self._init_neutron_network_plugin() self.mock_object(instance.neutron_api, 'update_subnet') self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) result = instance._get_service_subnet(subnet_name) self.assertEqual(subnets[0], result) instance._get_all_service_subnets.assert_called_once_with() instance.neutron_api.update_subnet.assert_called_once_with( subnets[0]['id'], subnet_name) def test__get_service_subnet_one_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name='quuz'), fake_network.FakeSubnet(id='bar', name=subnet_name)] instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) result = instance._get_service_subnet(subnet_name) self.assertEqual(subnets[1], result) instance._get_all_service_subnets.assert_called_once_with() def test__get_service_subnet_two_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name=subnet_name), fake_network.FakeSubnet(id='bar', name=subnet_name)] instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) self.assertRaises( exception.ServiceInstanceException, instance._get_service_subnet, subnet_name) instance._get_all_service_subnets.assert_called_once_with() def test__get_all_service_subnets(self): subnet_id1 = 'fake_subnet_id1' subnet_id2 = 'fake_subnet_id2' instance = self._init_neutron_network_plugin() network = dict(subnets=[subnet_id1, subnet_id2]) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(side_effect=lambda s_id: dict(id=s_id))) self.mock_object(instance.neutron_api, 'get_network', mock.Mock(return_value=network)) result = instance._get_all_service_subnets() self.assertEqual([dict(id=subnet_id1), dict(id=subnet_id2)], result) instance.neutron_api.get_network.assert_called_once_with( instance.service_network_id) instance.neutron_api.get_subnet.assert_has_calls([ mock.call(subnet_id1), mock.call(subnet_id2)]) @ddt.ddt class NovaNetworkHelperTestCase(test.TestCase): """Tests Nova network helper for service instance.""" def setUp(self): super(NovaNetworkHelperTestCase, self).setUp() self.fake_manager = FakeServiceInstance() def test_init(self): instance = service_instance.NovaNetworkHelper(self.fake_manager) self.assertEqual(service_instance.NOVA_NAME, instance.NAME) self.assertIsNone(instance.teardown_network('fake')) self.assertIsNone( instance.setup_connectivity_with_service_instances()) def test_get_network_name(self): network_info = dict(nova_net_id='fake_nova_net_id') network = dict(label='fake_network') instance = service_instance.NovaNetworkHelper(self.fake_manager) self.mock_object(instance.compute_api, 'network_get', mock.Mock(return_value=network)) result = instance.get_network_name(network_info) self.assertEqual(network['label'], result) instance.compute_api.network_get.assert_called_once_with( instance.admin_context, network_info['nova_net_id']) @ddt.data(None, [], {}, '') def test_get_network_name_invalid(self, net_name): network_info = dict(nova_net_id=net_name) instance = service_instance.NovaNetworkHelper(self.fake_manager) self.assertRaises( exception.ManilaException, instance.get_network_name, network_info) def test_setup_network(self): network_info = dict(nova_net_id='fake_nova_net_id') network = dict(label='fake_network', id='fake_network_id', gateway='fake_gateway_ip') instance = service_instance.NovaNetworkHelper(self.fake_manager) self.mock_object(instance.compute_api, 'network_get', mock.Mock(return_value=network)) expected = { 'nova_net_id': network_info['nova_net_id'], 'nics': [{'net-id': network['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) instance.compute_api.network_get.assert_called_once_with( instance.admin_context, network_info['nova_net_id']) @ddt.data(None, [], {}, '') def test_setup_network_invalid(self, net_name): network_info = dict(nova_net_id=net_name) instance = service_instance.NovaNetworkHelper(self.fake_manager) self.assertRaises( exception.ManilaException, instance.get_network_name, network_info) manila-2.0.0/manila/tests/share/drivers/glusterfs/0000775000567000056710000000000012701407265023320 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/glusterfs/test_common.py0000664000567000056710000010245112701407107026217 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test cases for GlusterFS common routines.""" import ddt import mock from oslo_config import cfg from manila import exception from manila.share.drivers.glusterfs import common from manila import test from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', } fake_args = ('foo', 'bar') fake_kwargs = {'key1': 'value1', 'key2': 'value2'} fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' NFS_EXPORT_DIR = 'nfs.export-dir' fakehost = 'example.com' fakevol = 'testvol' fakeexport = ':/'.join((fakehost, fakevol)) fakemnt = '/mnt/glusterfs' @ddt.ddt class GlusterManagerTestCase(test.TestCase): """Tests GlusterManager.""" def setUp(self): super(GlusterManagerTestCase, self).setUp() self.fake_execf = mock.Mock() self.fake_executor = mock.Mock(return_value=('', '')) with mock.patch.object(common.GlusterManager, 'make_gluster_call', return_value=self.fake_executor): self._gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, fake_path_to_private_key, fake_remote_server_password) fake_gluster_manager_dict = { 'host': '127.0.0.1', 'user': 'testuser', 'volume': 'testvol' } self._gluster_manager_dict = common.GlusterManager( fake_gluster_manager_dict, self.fake_execf, fake_path_to_private_key, fake_remote_server_password) self._gluster_manager_array = [self._gluster_manager, self._gluster_manager_dict] def test_check_volume_presence(self): common._check_volume_presence(mock.Mock())(self._gluster_manager) def test_check_volume_presence_error(self): gmgr = common.GlusterManager('testuser@127.0.0.1') self.assertRaises( exception.GlusterfsException, common._check_volume_presence(mock.Mock()), gmgr) def test_volxml_get(self): xmlout = mock.Mock() value = mock.Mock() value.text = 'foobar' xmlout.find = mock.Mock(return_value=value) ret = common.volxml_get(xmlout, 'some/path') self.assertEqual('foobar', ret) @ddt.data(None, 'some-value') def test_volxml_get_notfound_fallback(self, default): xmlout = mock.Mock() xmlout.find = mock.Mock(return_value=None) ret = common.volxml_get(xmlout, 'some/path', default) self.assertEqual(default, ret) def test_volxml_get_notfound(self): xmlout = mock.Mock() xmlout.find = mock.Mock(return_value=None) self.assertRaises(exception.InvalidShare, common.volxml_get, xmlout, 'some/path') def test_gluster_manager_common_init(self): for gmgr in self._gluster_manager_array: self.assertEqual( fake_gluster_manager_attrs['user'], gmgr.user) self.assertEqual( fake_gluster_manager_attrs['host'], gmgr.host) self.assertEqual( fake_gluster_manager_attrs['volume'], gmgr.volume) self.assertEqual( fake_gluster_manager_attrs['qualified'], gmgr.qualified) self.assertEqual( fake_gluster_manager_attrs['export'], gmgr.export) self.assertEqual( fake_gluster_manager_attrs['path_to_private_key'], gmgr.path_to_private_key) self.assertEqual( fake_gluster_manager_attrs['remote_server_password'], gmgr.remote_server_password) self.assertEqual( self.fake_executor, gmgr.gluster_call) @ddt.data({'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': None}, {'user': None, 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testpath'}, {'user': None, 'host': '127.0.0.1', 'volume': 'testvol', 'path': None}, {'user': None, 'host': '127.0.0.1', 'volume': None, 'path': None}, {'user': 'testuser', 'host': '127.0.0.1', 'volume': None, 'path': None}, {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testpath'}) def test_gluster_manager_init_check(self, test_addr_dict): test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf) self.assertEqual(test_addr_dict, test_gluster_manager.components) @ddt.data(None, True) def test_gluster_manager_init_has_vol(self, has_volume): test_gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, requires={'volume': has_volume}) self.assertEqual('testvol', test_gluster_manager.volume) @ddt.data(None, True) def test_gluster_manager_dict_init_has_vol(self, has_volume): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testdir'} test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf, requires={'volume': has_volume}) self.assertEqual('testvol', test_gluster_manager.volume) @ddt.data(None, False) def test_gluster_manager_init_no_vol(self, has_volume): test_gluster_manager = common.GlusterManager( 'testuser@127.0.0.1', self.fake_execf, requires={'volume': has_volume}) self.assertIsNone(test_gluster_manager.volume) @ddt.data(None, False) def test_gluster_manager_dict_init_no_vol(self, has_volume): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1'} test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf, requires={'volume': has_volume}) self.assertIsNone(test_gluster_manager.volume) def test_gluster_manager_init_has_shouldnt_have_vol(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, 'testuser@127.0.0.1:/testvol', self.fake_execf, requires={'volume': False}) def test_gluster_manager_dict_init_has_shouldnt_have_vol(self): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, self.fake_execf, requires={'volume': False}) def test_gluster_manager_hasnt_should_have_vol(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, 'testuser@127.0.0.1', self.fake_execf, requires={'volume': True}) def test_gluster_manager_dict_hasnt_should_have_vol(self): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, self.fake_execf, requires={'volume': True}) def test_gluster_manager_invalid(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, '127.0.0.1:vol', 'self.fake_execf') def test_gluster_manager_dict_invalid_req_host(self): test_addr_dict = {'user': 'testuser', 'volume': 'testvol'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, 'self.fake_execf') @ddt.data({'user': 'testuser'}, {'host': 'johndoe@example.com'}, {'host': 'example.com/so', 'volume': 'me/path'}, {'user': 'user@error', 'host': "example.com", 'volume': 'vol'}, {'host': 'example.com', 'volume': 'vol', 'pith': '/path'}, {'host': 'example.com', 'path': '/path'}, {'user': 'user@error', 'host': "example.com", 'path': '/path'}) def test_gluster_manager_dict_invalid_input(self, test_addr_dict): self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, 'self.fake_execf') def test_gluster_manager_getattr(self): self.assertEqual('testvol', self._gluster_manager.volume) def test_gluster_manager_getattr_called(self): class FakeGlusterManager(common.GlusterManager): pass _gluster_manager = FakeGlusterManager('127.0.0.1:/testvol', self.fake_execf) FakeGlusterManager.__getattr__ = mock.Mock() _gluster_manager.volume _gluster_manager.__getattr__.assert_called_once_with('volume') def test_gluster_manager_getattr_noattr(self): self.assertRaises(AttributeError, getattr, self._gluster_manager, 'fakeprop') @ddt.data({'mockargs': {}, 'kwargs': {}}, {'mockargs': {'side_effect': exception.ProcessExecutionError}, 'kwargs': {'error_policy': 'suppress'}}, {'mockargs': { 'side_effect': exception.ProcessExecutionError(exit_code=2)}, 'kwargs': {'error_policy': (2,)}}) @ddt.unpack def test_gluster_manager_make_gluster_call_local(self, mockargs, kwargs): fake_obj = mock.Mock(**mockargs) fake_execute = mock.Mock() kwargs.update(fake_kwargs) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) gluster_manager.make_gluster_call(fake_execute)(*fake_args, **kwargs) common.ganesha_utils.RootExecutor.assert_called_with( fake_execute) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) def test_gluster_manager_make_gluster_call_remote(self): fake_obj = mock.Mock() fake_execute = mock.Mock() with mock.patch.object(common.ganesha_utils, 'SSHExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, fake_path_to_private_key, fake_remote_server_password) gluster_manager.make_gluster_call(fake_execute)(*fake_args, **fake_kwargs) common.ganesha_utils.SSHExecutor.assert_called_with( gluster_manager.host, 22, None, gluster_manager.user, password=gluster_manager.remote_server_password, privatekey=gluster_manager.path_to_private_key) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) @ddt.data({'trouble': exception.ProcessExecutionError, '_exception': exception.GlusterfsException, 'xkw': {}}, {'trouble': exception.ProcessExecutionError(exit_code=2), '_exception': exception.GlusterfsException, 'xkw': {'error_policy': (1,)}}, {'trouble': exception.ProcessExecutionError, '_exception': exception.GlusterfsException, 'xkw': {'error_policy': 'coerce'}}, {'trouble': exception.ProcessExecutionError, '_exception': exception.ProcessExecutionError, 'xkw': {'error_policy': 'raw'}}, {'trouble': RuntimeError, '_exception': RuntimeError, 'xkw': {}}) @ddt.unpack def test_gluster_manager_make_gluster_call_error(self, trouble, _exception, xkw): fake_obj = mock.Mock(side_effect=trouble) fake_execute = mock.Mock() kwargs = fake_kwargs.copy() kwargs.update(xkw) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(_exception, gluster_manager.make_gluster_call(fake_execute), *fake_args, **kwargs) common.ganesha_utils.RootExecutor.assert_called_with( fake_execute) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) def test_gluster_manager_make_gluster_call_bad_policy(self): fake_obj = mock.Mock() fake_execute = mock.Mock() with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(TypeError, gluster_manager.make_gluster_call(fake_execute), *fake_args, error_policy='foobar') @ddt.data({}, {'opErrstr': None}, {'opErrstr': 'error'}) def test_xml_response_check(self, xdict): fdict = {'opRet': '0', 'opErrno': '0', 'some/count': '1'} fdict.update(xdict) def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self._gluster_manager.xml_response_check(xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data('1', '2') def test_xml_response_check_failure(self, count): fdict = {'opRet': '-1', 'opErrno': '0', 'some/count': count} def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self.assertRaises(exception.GlusterfsException, self._gluster_manager.xml_response_check, xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data({'opRet': '-2', 'opErrno': '0', 'some/count': '1'}, {'opRet': '0', 'opErrno': '1', 'some/count': '1'}, {'opRet': '0', 'opErrno': '0', 'some/count': '0'}, {'opRet': '0', 'opErrno': '0', 'some/count': '2'}) def test_xml_response_check_invalid(self, fdict): def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self.assertRaises(exception.InvalidShare, self._gluster_manager.xml_response_check, xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data({'opRet': '0', 'opErrno': '0'}, {'opRet': '0', 'opErrno': '0', 'some/count': '2'}) def test_xml_response_check_count_ignored(self, fdict): def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self._gluster_manager.xml_response_check(xtree, command) self.assertTrue(common.volxml_get.called) def test_get_vol_option_via_info_empty_volinfo(self): args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('', {}))) self.assertRaises(exception.GlusterfsException, self._gluster_manager._get_vol_option_via_info, 'foobar') self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info_ambiguous_volinfo(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 0 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) self.assertRaises(exception.InvalidShare, self._gluster_manager._get_vol_option_via_info, 'foobar') self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info_trivial_volinfo(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_option_via_info('foobar') self.assertIsNone(ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_option_via_info('foobar') self.assertEqual('FIRE MONKEY!', ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_user_option(self): self.mock_object(self._gluster_manager, '_get_vol_option_via_info', mock.Mock(return_value='VALUE')) ret = self._gluster_manager._get_vol_user_option('OPT') self.assertEqual(ret, 'VALUE') (self._gluster_manager._get_vol_option_via_info. assert_called_once_with('user.OPT')) def test_get_vol_regular_option_empty_reponse(self): args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('', {}))) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertIsNone(ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) @ddt.data(0, 2) def test_get_vol_regular_option_ambiguous_volinfo(self, count): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 %d """ % count, '' args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) self.assertRaises(exception.InvalidShare, self._gluster_manager._get_vol_regular_option, NFS_EXPORT_DIR) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) def test_get_vol_regular_option(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 /foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1) """, '' args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertEqual('/foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1)', ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) def test_get_vol_regular_option_not_suppored(self): args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=( """Ceci n'est pas un XML.""", ''))) self.mock_object(self._gluster_manager, '_get_vol_option_via_info', mock.Mock(return_value="VALUE")) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertEqual("VALUE", ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) (self._gluster_manager._get_vol_option_via_info. assert_called_once_with(NFS_EXPORT_DIR)) @ddt.data({'opt': 'some.option', 'opttype': 'regular', 'lowopt': 'some.option'}, {'opt': 'user.param', 'opttype': 'user', 'lowopt': 'param'}) @ddt.unpack def test_get_vol_option(self, opt, opttype, lowopt): for t in ('user', 'regular'): self.mock_object(self._gluster_manager, '_get_vol_%s_option' % t, mock.Mock(return_value='value-%s' % t)) ret = self._gluster_manager.get_vol_option(opt) self.assertEqual('value-%s' % opttype, ret) for t in ('user', 'regular'): func = getattr(self._gluster_manager, '_get_vol_%s_option' % t) if opttype == t: func.assert_called_once_with(lowopt) else: self.assertFalse(func.called) def test_get_vol_option_unset(self): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value=None)) ret = self._gluster_manager.get_vol_option('some.option') self.assertIsNone(ret) @ddt.data({'value': '0', 'boolval': False}, {'value': 'Off', 'boolval': False}, {'value': 'no', 'boolval': False}, {'value': '1', 'boolval': True}, {'value': 'true', 'boolval': True}, {'value': 'enAble', 'boolval': True}, {'value': None, 'boolval': None}) @ddt.unpack def test_get_vol_option_boolean(self, value, boolval): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value=value)) ret = self._gluster_manager.get_vol_option('some.option', boolean=True) self.assertEqual(boolval, ret) def test_get_vol_option_boolean_bad(self): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value='jabberwocky')) self.assertRaises(exception.GlusterfsException, self._gluster_manager.get_vol_option, 'some.option', boolean=True) @ddt.data({'setting': 'some_value', 'args': ('set', 'some_value')}, {'setting': None, 'args': ('reset',)}, {'setting': True, 'args': ('set', 'ON')}, {'setting': False, 'args': ('set', 'OFF')}) @ddt.unpack def test_set_vol_option(self, setting, args): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock()) self._gluster_manager.set_vol_option('an_option', setting) self._gluster_manager.gluster_call.assert_called_once_with( 'volume', args[0], 'testvol', 'an_option', *args[1:], error_policy=mock.ANY) @ddt.data({}, {'ignore_failure': False}) def test_set_vol_option_error(self, kwargs): fake_obj = mock.Mock( side_effect=exception.ProcessExecutionError(exit_code=1)) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(exception.GlusterfsException, gluster_manager.set_vol_option, 'an_option', "some_value", **kwargs) self.assertTrue(fake_obj.called) def test_set_vol_option_error_relaxed(self): fake_obj = mock.Mock( side_effect=exception.ProcessExecutionError(exit_code=1)) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) gluster_manager.set_vol_option('an_option', "some_value", ignore_failure=True) self.assertTrue(fake_obj.called) def test_get_gluster_version(self): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('glusterfs 3.6.2beta3', ''))) ret = self._gluster_manager.get_gluster_version() self.assertEqual(['3', '6', '2beta3'], ret) self._gluster_manager.gluster_call.assert_called_once_with( '--version', log=mock.ANY) @ddt.data("foo 1.1.1", "glusterfs 3-6", "glusterfs 3.6beta3") def test_get_gluster_version_exception(self, versinfo): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=(versinfo, ''))) self.assertRaises(exception.GlusterfsException, self._gluster_manager.get_gluster_version) self._gluster_manager.gluster_call.assert_called_once_with( '--version', log=mock.ANY) def test_check_gluster_version(self): self.mock_object(self._gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) ret = self._gluster_manager.check_gluster_version((3, 5, 2)) self.assertIsNone(ret) self._gluster_manager.get_gluster_version.assert_called_once_with() def test_check_gluster_version_unmet(self): self.mock_object(self._gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '5', '2'))) self.assertRaises(exception.GlusterfsException, self._gluster_manager.check_gluster_version, (3, 6)) self._gluster_manager.get_gluster_version.assert_called_once_with() @ddt.data(('3', '6'), ('3', '6', '2beta'), ('3', '6', '2beta', '4')) def test_numreduct(self, vers): ret = common.numreduct(vers) self.assertEqual((3, 6), ret) @ddt.ddt class GlusterFSCommonTestCase(test.TestCase): """Tests common GlusterFS utility functions.""" def setUp(self): super(GlusterFSCommonTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) self.mock_object(common.GlusterManager, 'make_gluster_call') @staticmethod def _mount_exec(vol, mnt): return ['mkdir -p %s' % mnt, 'mount -t glusterfs %(exp)s %(mnt)s' % {'exp': vol, 'mnt': mnt}] def test_mount_gluster_vol(self): expected_exec = self._mount_exec(fakeexport, fakemnt) ret = common._mount_gluster_vol(self._execute, fakeexport, fakemnt, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertIsNone(ret) def test_mount_gluster_vol_mounted_noensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = self._mount_exec(fakeexport, fakemnt) fake_utils.fake_execute_set_repliers([('mount', exec_runner)]) self.assertRaises(exception.GlusterfsException, common._mount_gluster_vol, self._execute, fakeexport, fakemnt, False) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_mount_gluster_vol_mounted_ensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = self._mount_exec(fakeexport, fakemnt) common.LOG.warning = mock.Mock() fake_utils.fake_execute_set_repliers([('mount', exec_runner)]) ret = common._mount_gluster_vol(self._execute, fakeexport, fakemnt, True) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertIsNone(ret) common.LOG.warning.assert_called_with( "%s is already mounted.", fakeexport) @ddt.data(True, False) def test_mount_gluster_vol_fail(self, ensure): def exec_runner(*ignore_args, **ignore_kwargs): raise RuntimeError('fake error') expected_exec = self._mount_exec(fakeexport, fakemnt) fake_utils.fake_execute_set_repliers([('mount', exec_runner)]) self.assertRaises(RuntimeError, common._mount_gluster_vol, self._execute, fakeexport, fakemnt, ensure) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_umount_gluster_vol(self): expected_exec = ['umount %s' % fakemnt] ret = common._umount_gluster_vol(self._execute, fakemnt) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertIsNone(ret) @ddt.data({'in_exc': exception.ProcessExecutionError, 'out_exc': exception.GlusterfsException}, {'in_exc': RuntimeError, 'out_exc': RuntimeError}) @ddt.unpack def test_umount_gluster_vol_fail(self, in_exc, out_exc): def exec_runner(*ignore_args, **ignore_kwargs): raise in_exc('fake error') expected_exec = ['umount %s' % fakemnt] fake_utils.fake_execute_set_repliers([('umount', exec_runner)]) self.assertRaises(out_exc, common._umount_gluster_vol, self._execute, fakemnt) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_restart_gluster_vol(self): gmgr = common.GlusterManager(fakeexport, self._execute, None, None) test_args = [(('volume', 'stop', fakevol, '--mode=script'), {'log': mock.ANY}), (('volume', 'start', fakevol), {'log': mock.ANY})] common._restart_gluster_vol(gmgr) self.assertEqual( [mock.call(*arg[0], **arg[1]) for arg in test_args], gmgr.gluster_call.call_args_list) manila-2.0.0/manila/tests/share/drivers/glusterfs/test_layout_directory.py0000664000567000056710000004171512701407107030335 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout_directory from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', 'components': {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': None} } fake_local_share_path = '/mnt/nfs/testvol/fakename' fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' @ddt.ddt class GlusterfsDirectoryMappedLayoutTestCase(test.TestCase): """Tests GlusterfsDirectoryMappedLayout.""" def setUp(self): super(GlusterfsDirectoryMappedLayoutTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) CONF.set_default('glusterfs_target', '127.0.0.1:/testvol') CONF.set_default('glusterfs_mount_point_base', '/mnt/nfs') CONF.set_default('glusterfs_server_password', fake_remote_server_password) CONF.set_default('glusterfs_path_to_private_key', fake_path_to_private_key) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6) self.fake_conf = config.Configuration(None) self.mock_object(common.GlusterManager, 'make_gluster_call') self._layout = layout_directory.GlusterfsDirectoryMappedLayout( self.fake_driver, configuration=self.fake_conf) self._layout.gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self.share = fake_share.fake_share(share_proto='NFS') def test_do_setup(self): fake_gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self.mock_object(fake_gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '5'))) methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self._layout.do_setup(self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') self._layout._check_mount_glusterfs.assert_called_once_with() self._layout._ensure_gluster_vol_mounted.assert_called_once_with() def test_do_setup_glusterfs_target_not_set(self): self._layout.configuration.glusterfs_target = None self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) def test_do_setup_error_enabling_creation_share_specific_size(self): attrs = {'volume': 'testvol', 'gluster_call.side_effect': exception.GlusterfsException, 'get_vol_option.return_value': 'off'} fake_gluster_manager = mock.Mock(**attrs) self.mock_object(layout_directory.LOG, 'error') methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') (self._layout.gluster_manager.get_vol_option. assert_called_once_with('features.quota')) layout_directory.LOG.error.assert_called_once_with(mock.ANY) self._layout._check_mount_glusterfs.assert_called_once_with() self.assertFalse(self._layout._ensure_gluster_vol_mounted.called) def test_do_setup_error_already_enabled_creation_share_specific_size(self): attrs = {'volume': 'testvol', 'gluster_call.side_effect': exception.GlusterfsException, 'get_vol_option.return_value': 'on'} fake_gluster_manager = mock.Mock(**attrs) self.mock_object(layout_directory.LOG, 'error') methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self._layout.do_setup(self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') (self._layout.gluster_manager.get_vol_option. assert_called_once_with('features.quota')) self.assertFalse(layout_directory.LOG.error.called) self._layout._check_mount_glusterfs.assert_called_once_with() self._layout._ensure_gluster_vol_mounted.assert_called_once_with() def test_share_manager(self): self._layout._glustermanager = mock.Mock() self._layout._share_manager(self.share) self._layout._glustermanager.assert_called_once_with( {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/fakename'}) def test_ensure_gluster_vol_mounted(self): common._mount_gluster_vol = mock.Mock() self._layout._ensure_gluster_vol_mounted() self.assertTrue(common._mount_gluster_vol.called) def test_ensure_gluster_vol_mounted_error(self): common._mount_gluster_vol =\ mock.Mock(side_effect=exception.GlusterfsException) self.assertRaises(exception.GlusterfsException, self._layout._ensure_gluster_vol_mounted) def test_get_local_share_path(self): with mock.patch.object(os, 'access', return_value=True): ret = self._layout._get_local_share_path(self.share) self.assertEqual('/mnt/nfs/testvol/fakename', ret) def test_local_share_path_not_exists(self): with mock.patch.object(os, 'access', return_value=False): self.assertRaises(exception.GlusterfsException, self._layout._get_local_share_path, self.share) def test_update_share_stats(self): test_statvfs = mock.Mock(f_frsize=4096, f_blocks=524288, f_bavail=524288) self._layout._get_mount_point_for_gluster_vol = \ mock.Mock(return_value='/mnt/nfs/testvol') some_no = 42 not_some_no = some_no + 1 os_stat = lambda path: mock.Mock(st_dev=some_no) if path == '/mnt/nfs' \ else mock.Mock(st_dev=not_some_no) with mock.patch.object(os, 'statvfs', return_value=test_statvfs): with mock.patch.object(os, 'stat', os_stat): ret = self._layout._update_share_stats() test_data = { 'total_capacity_gb': 2, 'free_capacity_gb': 2, } self.assertEqual(test_data, ret) def test_update_share_stats_gluster_mnt_unavailable(self): self._layout._get_mount_point_for_gluster_vol = \ mock.Mock(return_value='/mnt/nfs/testvol') some_no = 42 with mock.patch.object(os, 'stat', return_value=mock.Mock(st_dev=some_no)): self.assertRaises(exception.GlusterfsException, self._layout._update_share_stats) @ddt.data((), (None,)) def test_create_share(self, extra_args): exec_cmd1 = 'mkdir %s' % fake_local_share_path expected_exec = [exec_cmd1, ] expected_ret = 'testuser@127.0.0.1:/testvol/fakename' self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) gmgr = mock.Mock() self.mock_object( self._layout, '_glustermanager', mock.Mock(return_value=gmgr)) self.mock_object( self._layout.driver, '_setup_via_manager', mock.Mock(return_value=expected_ret)) ret = self._layout.create_share(self._context, self.share, *extra_args) self._layout._get_local_share_path.called_once_with(self.share) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '1GB') self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self._layout._glustermanager.assert_called_once_with( {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/fakename'}) self._layout.driver._setup_via_manager.assert_called_once_with( {'share': self.share, 'manager': gmgr}) self.assertEqual(expected_ret, ret) @ddt.data(exception.ProcessExecutionError, exception.GlusterfsException) def test_create_share_unable_to_create_share(self, trouble): def exec_runner(*ignore_args, **ignore_kw): raise trouble self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) self.mock_object(self._layout, '_cleanup_create_share') self.mock_object(layout_directory.LOG, 'error') expected_exec = ['mkdir %s' % fake_local_share_path] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises( exception.GlusterfsException, self._layout.create_share, self._context, self.share) self._layout._get_local_share_path.called_once_with(self.share) self._layout._cleanup_create_share.assert_called_once_with( fake_local_share_path, self.share['name']) layout_directory.LOG.error.assert_called_once_with( mock.ANY, mock.ANY) def test_create_share_unable_to_create_share_weird(self): def exec_runner(*ignore_args, **ignore_kw): raise RuntimeError self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) self.mock_object(self._layout, '_cleanup_create_share') self.mock_object(layout_directory.LOG, 'error') expected_exec = ['mkdir %s' % fake_local_share_path] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises( RuntimeError, self._layout.create_share, self._context, self.share) self._layout._get_local_share_path.called_once_with(self.share) self.assertFalse(self._layout._cleanup_create_share.called) def test_cleanup_create_share_local_share_path_exists(self): expected_exec = ['rm -rf %s' % fake_local_share_path] self.mock_object(os.path, 'exists', mock.Mock(return_value=True)) ret = self._layout._cleanup_create_share(fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) self.assertIsNone(ret) def test_cleanup_create_share_cannot_cleanup_unusable_share(self): def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError expected_exec = ['rm -rf %s' % fake_local_share_path] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.mock_object(layout_directory.LOG, 'error') self.mock_object(os.path, 'exists', mock.Mock(return_value=True)) self.assertRaises(exception.GlusterfsException, self._layout._cleanup_create_share, fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) layout_directory.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) def test_cleanup_create_share_local_share_path_does_not_exist(self): self.mock_object(os.path, 'exists', mock.Mock(return_value=False)) ret = self._layout._cleanup_create_share(fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) self.assertIsNone(ret) def test_delete_share(self): self._layout._get_local_share_path =\ mock.Mock(return_value='/mnt/nfs/testvol/fakename') self._layout.delete_share(self._context, self.share) self.assertEqual(['rm -rf /mnt/nfs/testvol/fakename'], fake_utils.fake_execute_get_log()) def test_cannot_delete_share(self): self._layout._get_local_share_path =\ mock.Mock(return_value='/mnt/nfs/testvol/fakename') def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError expected_exec = ['rm -rf %s' % (self._layout._get_local_share_path())] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(exception.ProcessExecutionError, self._layout.delete_share, self._context, self.share) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_delete_share_can_be_called_with_extra_arg_share_server(self): self._layout._get_local_share_path = mock.Mock() share_server = None ret = self._layout.delete_share(self._context, self.share, share_server) self.assertIsNone(ret) self._layout._get_local_share_path.assert_called_once_with(self.share) def test_ensure_share(self): self.assertIsNone(self._layout.ensure_share(self._context, self.share)) @ddt.data( ('create_share_from_snapshot', ('context', 'share', 'snapshot'), {'share_server': None}), ('create_snapshot', ('context', 'snapshot'), {'share_server': None}), ('delete_snapshot', ('context', 'snapshot'), {'share_server': None}), ('manage_existing', ('share', 'driver_options'), {}), ('unmanage', ('share',), {}), ('extend_share', ('share', 'new_size'), {'share_server': None}), ('shrink_share', ('share', 'new_size'), {'share_server': None})) def test_nonimplemented_methods(self, method_invocation): method, args, kwargs = method_invocation self.assertRaises(NotImplementedError, getattr(self._layout, method), *args, **kwargs) manila-2.0.0/manila/tests/share/drivers/glusterfs/__init__.py0000664000567000056710000000000012701407107025412 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/glusterfs/test_layout.py0000664000567000056710000002562712701407107026255 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import ddt import mock from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila.share import configuration as config from manila.share import driver from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_utils CONF = cfg.CONF fake_local_share_path = '/mnt/nfs/testvol/fakename' fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase): supported_layouts = ('layout_fake.FakeLayout', 'layout_something.SomeLayout') supported_protocols = ('NFS,') @ddt.ddt class GlusterfsShareDriverBaseTestCase(test.TestCase): """Tests GlusterfsShareDriverBase.""" def setUp(self): super(GlusterfsShareDriverBaseTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) fake_conf, __ = self._setup() self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf) self.fake_share = mock.Mock() self.fake_context = mock.Mock() self.fake_access = mock.Mock() def _setup(self): fake_conf = config.Configuration(None) fake_layout = mock.Mock() self.mock_object(importutils, "import_object", mock.Mock(return_value=fake_layout)) return fake_conf, fake_layout def test_init(self): self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False, configuration=config.Configuration(None)) @ddt.data({'has_snap': None, 'layout_name': None}, {'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'}, {'has_snap': True, 'layout_name': 'layout_something.SomeLayout'}) @ddt.unpack def test_init_subclass(self, has_snap, layout_name): conf, _layout = self._setup() if layout_name is not None: conf.glusterfs_share_layout = layout_name if has_snap is None: del(_layout._snapshots_are_supported) else: _layout._snapshots_are_supported = has_snap _driver = GlusterfsFakeShareDriver(False, configuration=conf) snap_result = {None: False}.get(has_snap, has_snap) layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name, layout_name) importutils.import_object.assert_called_once_with( 'manila.share.drivers.glusterfs.%s' % layout_result, _driver, configuration=conf) self.assertEqual(_layout, _driver.layout) self.assertEqual(snap_result, _driver.snapshots_are_supported) def test_init_nosupp_layout(self): conf = config.Configuration(None) conf.glusterfs_share_layout = 'nonsense_layout' self.assertRaises(exception.GlusterfsException, GlusterfsFakeShareDriver, False, configuration=conf) def test_setup_via_manager(self): self.assertIsNone(self._driver._setup_via_manager(mock.Mock())) @ddt.data('allow', 'deny') def test_allow_deny_access(self, op): conf, _layout = self._setup() gmgr = mock.Mock() self.mock_object(_layout, '_share_manager', mock.Mock(return_value=gmgr)) _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, "_%s_access_via_manager" % op, mock.Mock()) getattr(_driver, "%s_access" % op)(self.fake_context, self.fake_share, self.fake_access) _layout._share_manager.assert_called_once_with(self.fake_share) getattr(_driver, "_%s_access_via_manager" % op).assert_called_once_with( gmgr, self.fake_context, self.fake_share, self.fake_access, None) @ddt.data('allow', 'deny') def test_allow_deny_access_via_manager(self, op): self.assertRaises(NotImplementedError, getattr(self._driver, "_%s_access_via_manager" % op), mock.Mock(), self.fake_context, self.fake_share, self.fake_access, None) @ddt.data('NFS', 'PROTATO') def test_check_proto_baseclass(self, proto): self.assertRaises(exception.ShareBackendException, layout.GlusterfsShareDriverBase._check_proto, {'share_proto': proto}) def test_check_proto(self): GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'}) def test_check_proto_notsupported(self): self.assertRaises(exception.ShareBackendException, GlusterfsFakeShareDriver._check_proto, {'share_proto': 'PROTATO'}) @ddt.data('', '_from_snapshot') def test_create_share(self, variant): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, '_check_proto', mock.Mock()) getattr(_driver, 'create_share%s' % variant)(self.fake_context, self.fake_share) _driver._check_proto.assert_called_once_with(self.fake_share) getattr(_layout, 'create_share%s' % variant).assert_called_once_with( self.fake_context, self.fake_share) @ddt.data(True, False) def test_update_share_stats(self, internal_exception): data = mock.Mock() conf, _layout = self._setup() def raise_exception(*args, **kwargs): raise NotImplementedError layoutstats = mock.Mock() mock_kw = ({'side_effect': raise_exception} if internal_exception else {'return_value': layoutstats}) self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw)) self.mock_object(driver.ShareDriver, '_update_share_stats', mock.Mock()) _driver = GlusterfsFakeShareDriver(False, configuration=conf) _driver._update_share_stats(data) if internal_exception: self.assertFalse(data.update.called) else: data.update.assert_called_once_with(layoutstats) driver.ShareDriver._update_share_stats.assert_called_once_with( data) @ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot', 'ensure_share', 'manage_existing', 'unmanage', 'extend_share', 'shrink_share') def test_delegated_methods(self, method): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) fake_args = (mock.Mock(), mock.Mock(), mock.Mock()) getattr(_driver, method)(*fake_args) getattr(_layout, method).assert_called_once_with(*fake_args) @ddt.ddt class GlusterfsShareLayoutBaseTestCase(test.TestCase): """Tests GlusterfsShareLayoutBaseTestCase.""" def setUp(self): super(GlusterfsShareLayoutBaseTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) class FakeLayout(layout.GlusterfsShareLayoutBase): def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" def do_setup(self, context): """Any initialization the share driver does while starting.""" def create_share(self, context, share, share_server=None): """Is called to create share.""" def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" def delete_share(self, context, share, share_server=None): """Is called to remove share.""" def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" def unmanage(self, share): """Removes the specified share from Manila management.""" def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def test_init_invalid(self): self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase, mock.Mock()) def test_subclass(self): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) self.assertEqual(fake_conf, _layout.configuration) self.assertRaises(NotImplementedError, _layout._update_share_stats) def test_check_mount_glusterfs(self): fake_conf = mock.Mock() _driver = mock.Mock() _driver._execute = mock.Mock() _layout = self.FakeLayout(_driver, configuration=fake_conf) _layout._check_mount_glusterfs() _driver._execute.assert_called_once_with( 'mount.glusterfs', check_exit_code=False) @ddt.data({'_errno': errno.ENOENT, '_exception': exception.GlusterfsException}, {'_errno': errno.EACCES, '_exception': OSError}) @ddt.unpack def test_check_mount_glusterfs_not_installed(self, _errno, _exception): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) def exec_runner(*ignore_args, **ignore_kwargs): raise OSError(_errno, os.strerror(_errno)) expected_exec = ['mount.glusterfs'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(_exception, _layout._check_mount_glusterfs) manila-2.0.0/manila/tests/share/drivers/glusterfs/test_glusterfs_native.py0000664000567000056710000003312512701407107030314 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS native protocol (glusterfs) driver for shares. Test cases for GlusterFS native protocol driver. """ import ddt import mock from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import glusterfs_native from manila import test from manila.tests import fake_utils CONF = cfg.CONF def new_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'glusterfs', } share.update(kwargs) return share @ddt.ddt class GlusterfsNativeShareDriverTestCase(test.TestCase): """Tests GlusterfsNativeShareDriver.""" def setUp(self): super(GlusterfsNativeShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.glusterfs_target1 = 'root@host1:/gv1' self.glusterfs_target2 = 'root@host2:/gv2' self.glusterfs_server1 = 'root@host1' self.glusterfs_server2 = 'root@host2' self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1' self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2' self.share1 = new_share( export_location=self.glusterfs_target1, status=constants.STATUS_AVAILABLE) self.share2 = new_share( export_location=self.glusterfs_target2, status=constants.STATUS_AVAILABLE) self.gmgr1 = common.GlusterManager(self.glusterfs_server1, self._execute, None, None, requires={'volume': False}) self.gmgr2 = common.GlusterManager(self.glusterfs_server2, self._execute, None, None, requires={'volume': False}) self.glusterfs_volumes_dict = ( {'root@host1:/manila-share-1-1G': {'size': 1}, 'root@host2:/manila-share-2-2G': {'size': 2}}) self.glusterfs_used_vols = set([ 'root@host1:/manila-share-1-1G', 'root@host2:/manila-share-2-2G']) CONF.set_default('glusterfs_volume_pattern', 'manila-share-\d+-#{size}G$') CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self.mock_object(common.GlusterManager, 'make_gluster_call') self._driver = glusterfs_native.GlusterfsNativeShareDriver( execute=self._execute, configuration=self.fake_conf) self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) def test_supported_protocols(self): self.assertEqual(('GLUSTERFS', ), self._driver.supported_protocols) def test_setup_via_manager(self): gmgr = mock.Mock() gmgr.gluster_call = mock.Mock() gmgr.set_vol_option = mock.Mock() gmgr.volume = 'fakevol' gmgr.export = 'fakehost:/fakevol' gmgr.get_vol_option = mock.Mock( return_value='glusterfs-server-name,some-other-name') share = mock.Mock() settings = ( ('nfs.export-volumes', False, {}), ('client.ssl', True, {}), ('server.ssl', True, {}), ('server.dynamic-auth', True, {'ignore_failure': True}), ) call_args = ( ('volume', 'stop', 'fakevol', '--mode=script', {'log': mock.ANY}), ('volume', 'start', 'fakevol', {'log': mock.ANY}), ) ret = self._driver._setup_via_manager({'manager': gmgr, 'share': share}) gmgr.get_vol_option.assert_called_once_with('auth.ssl-allow') gmgr.set_vol_option.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in settings]) gmgr.gluster_call.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in call_args]) self.assertEqual(ret, gmgr.export) def test_setup_via_manager_with_parent(self): gmgr = mock.Mock() gmgr.set_vol_option = mock.Mock() gmgr.volume = 'fakevol' gmgr.export = 'fakehost:/fakevol' gmgr_parent = mock.Mock() gmgr_parent.get_vol_option = mock.Mock( return_value=( 'glusterfs-server-name,some-other-name,manila-host.com')) share = mock.Mock() share_parent = mock.Mock() settings = ( ('auth.ssl-allow', 'glusterfs-server-name,manila-host.com', {}), ('nfs.export-volumes', False, {}), ('client.ssl', True, {}), ('server.ssl', True, {}), ('server.dynamic-auth', True, {'ignore_failure': True}), ) ret = self._driver._setup_via_manager( {'manager': gmgr, 'share': share}, {'manager': gmgr_parent, 'share': share_parent}) gmgr_parent.get_vol_option.assert_called_once_with( 'auth.ssl-allow') gmgr.set_vol_option.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in settings]) self.assertEqual(ret, gmgr.export) @ddt.data(True, False) def test_setup_via_manager_no_option_data(self, has_parent): share = mock.Mock() gmgr = mock.Mock() if has_parent: share_parent = mock.Mock() gmgr_parent = mock.Mock() share_mgr_parent = {'share': share_parent, 'manager': gmgr_parent} gmgr_queried = gmgr_parent else: share_mgr_parent = None gmgr_queried = gmgr gmgr_queried.get_vol_option = mock.Mock(return_value='') self.assertRaises(exception.GlusterfsException, self._driver._setup_via_manager, {'share': share, 'manager': gmgr}, share_mgr_parent=share_mgr_parent) gmgr_queried.get_vol_option.assert_called_once_with( 'auth.ssl-allow') def test_snapshots_are_supported(self): self.assertTrue(self._driver.snapshots_are_supported) def test_allow_access_via_manager(self): access = {'access_type': 'cert', 'access_to': 'client.example.com'} gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object(gmgr1, 'get_vol_option', mock.Mock(return_value='some.common.name')) self.mock_object(gmgr1, 'set_vol_option') test_args = ('auth.ssl-allow', 'some.common.name,' + access['access_to']) self._driver.layout.gluster_used_vols = set([self.glusterfs_target1]) self._driver._allow_access_via_manager(gmgr1, self._context, self.share1, access) gmgr1.get_vol_option.assert_called_once_with('auth.ssl-allow') gmgr1.set_vol_option.assert_called_once_with(*test_args) def test_allow_access_via_manager_with_share_having_access(self): access = {'access_type': 'cert', 'access_to': 'client.example.com'} gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object( gmgr1, 'get_vol_option', mock.Mock(return_value='some.common.name,' + access['access_to'])) self.mock_object(gmgr1, 'set_vol_option') self._driver.layout.gluster_used_vols = set([self.glusterfs_target1]) self._driver._allow_access_via_manager(gmgr1, self._context, self.share1, access) gmgr1.get_vol_option.assert_called_once_with('auth.ssl-allow') self.assertFalse(gmgr1.set_vol_option.called) def test_allow_access_via_manager_invalid_access_type(self): access = {'access_type': 'invalid', 'access_to': 'client.example.com'} expected_exec = [] self.assertRaises(exception.InvalidShareAccess, self._driver._allow_access_via_manager, self.gmgr1, self._context, self.share1, access) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) def test_deny_access_via_manager(self): self.mock_object(common, '_restart_gluster_vol', mock.Mock()) access = {'access_type': 'cert', 'access_to': 'client.example.com'} gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) def _get_vol_option(opt, **kw): if opt == 'auth.ssl-allow': return('some.common.name,' + access['access_to']) elif opt == 'server.dynamic-auth': return True self.mock_object( gmgr1, 'get_vol_option', mock.Mock(side_effect=_get_vol_option)) self.mock_object(gmgr1, 'set_vol_option') self._driver.layout.gluster_used_vols = set([self.glusterfs_target1]) self._driver._deny_access_via_manager(gmgr1, self._context, self.share1, access) gmgr1.get_vol_option.assert_has_calls( [mock.call(a, **kw) for a, kw in ( ('auth.ssl-allow', {}), ('server.dynamic-auth', {'boolean': True}))]) test_args = ('auth.ssl-allow', 'some.common.name') gmgr1.set_vol_option.assert_called_once_with(*test_args) self.assertFalse(common._restart_gluster_vol.called) def test_deny_access_via_manager_no_dyn_auth(self): self.mock_object(common, '_restart_gluster_vol', mock.Mock()) access = {'access_type': 'cert', 'access_to': 'client.example.com'} gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) def _get_vol_option(opt, **kw): if opt == 'auth.ssl-allow': return('some.common.name,' + access['access_to']) elif opt == 'server.dynamic-auth': return False self.mock_object( gmgr1, 'get_vol_option', mock.Mock(side_effect=_get_vol_option)) self.mock_object(gmgr1, 'set_vol_option') self._driver.layout.gluster_used_vols = set([self.glusterfs_target1]) self._driver._deny_access_via_manager(gmgr1, self._context, self.share1, access) gmgr1.get_vol_option.assert_has_calls( [mock.call(a, **kw) for a, kw in ( ('auth.ssl-allow', {}), ('server.dynamic-auth', {'boolean': True}))]) test_args = ('auth.ssl-allow', 'some.common.name') gmgr1.set_vol_option.assert_called_once_with(*test_args) common._restart_gluster_vol.assert_called_once_with(gmgr1) def test_deny_access_via_manager_with_share_having_no_access(self): self.mock_object(common, '_restart_gluster_vol', mock.Mock()) access = {'access_type': 'cert', 'access_to': 'client.example.com'} gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object(gmgr1, 'get_vol_option', mock.Mock(return_value='some.common.name')) self.mock_object(gmgr1, 'set_vol_option') self._driver.layout.gluster_used_vols = set([self.glusterfs_target1]) self._driver._deny_access_via_manager(gmgr1, self._context, self.share1, access) gmgr1.get_vol_option.assert_called_once_with('auth.ssl-allow') self.assertFalse(gmgr1.set_vol_option.called) self.assertFalse(common._restart_gluster_vol.called) def test_deny_access_via_manager_invalid_access_type(self): self.mock_object(common, '_restart_gluster_vol', mock.Mock()) access = {'access_type': 'invalid', 'access_to': 'NotApplicable'} self.assertRaises(exception.InvalidShareAccess, self._driver._deny_access_via_manager, self.gmgr1, self._context, self.share1, access) self.assertFalse(common._restart_gluster_vol.called) def test_update_share_stats(self): self._driver._update_share_stats() test_data = { 'share_backend_name': 'GlusterFS-Native', 'driver_handles_share_servers': False, 'vendor_name': 'Red Hat', 'driver_version': '1.1', 'storage_protocol': 'glusterfs', 'reserved_percentage': 0, 'qos': False, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'pools': None, 'snapshot_support': True, 'replication_domain': None, } self.assertEqual(test_data, self._driver._stats) def test_get_network_allocations_number(self): self.assertEqual(0, self._driver.get_network_allocations_number()) manila-2.0.0/manila/tests/share/drivers/glusterfs/test_layout_volume.py0000664000567000056710000012571312701407107027641 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS volume mapped share layout testcases. """ import re import shutil import tempfile import ddt import mock from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout_volume from manila import test from manila.tests import fake_utils CONF = cfg.CONF def new_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'glusterfs', } share.update(kwargs) return share def glusterXMLOut(**kwargs): template = """ %(ret)d %(errno)d fake error """ return template % kwargs, '' FAKE_UUID1 = '11111111-1111-1111-1111-111111111111' FAKE_UUID2 = '22222222-2222-2222-2222-222222222222' @ddt.ddt class GlusterfsVolumeMappedLayoutTestCase(test.TestCase): """Tests GlusterfsVolumeMappedLayout.""" def setUp(self): super(GlusterfsVolumeMappedLayoutTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.glusterfs_target1 = 'root@host1:/gv1' self.glusterfs_target2 = 'root@host2:/gv2' self.glusterfs_server1 = 'root@host1' self.glusterfs_server2 = 'root@host2' self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1' self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2' self.share1 = new_share( export_location=self.glusterfs_target1, status=constants.STATUS_AVAILABLE) self.share2 = new_share( export_location=self.glusterfs_target2, status=constants.STATUS_AVAILABLE) gmgr = common.GlusterManager self.gmgr1 = gmgr(self.glusterfs_server1, self._execute, None, None, requires={'volume': False}) self.gmgr2 = gmgr(self.glusterfs_server2, self._execute, None, None, requires={'volume': False}) self.glusterfs_volumes_dict = ( {'root@host1:/manila-share-1-1G': {'size': 1}, 'root@host2:/manila-share-2-2G': {'size': 2}}) self.glusterfs_used_vols = set([ 'root@host1:/manila-share-1-1G', 'root@host2:/manila-share-2-2G']) CONF.set_default('glusterfs_servers', [self.glusterfs_server1, self.glusterfs_server2]) CONF.set_default('glusterfs_server_password', 'fake_password') CONF.set_default('glusterfs_path_to_private_key', '/fakepath/to/privatekey') CONF.set_default('glusterfs_volume_pattern', 'manila-share-\d+-#{size}G$') CONF.set_default('driver_handles_share_servers', False) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6) self.fake_conf = config.Configuration(None) self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value='/tmp/tmpKGHKJ')) self.mock_object(common.GlusterManager, 'make_gluster_call') self.fake_private_storage = mock.Mock() with mock.patch.object(layout_volume.GlusterfsVolumeMappedLayout, '_glustermanager', side_effect=[self.gmgr1, self.gmgr2]): self._layout = layout_volume.GlusterfsVolumeMappedLayout( self.fake_driver, configuration=self.fake_conf, private_storage=self.fake_private_storage) self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6'), self.glusterfs_server2: ('3', '7')} self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) @ddt.data({"test_kwargs": {}, "requires": {"volume": True}}, {"test_kwargs": {'req_volume': False}, "requires": {"volume": False}}) @ddt.unpack def test_glustermanager(self, test_kwargs, requires): fake_obj = mock.Mock() self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_obj)) ret = self._layout._glustermanager(self.glusterfs_target1, **test_kwargs) common.GlusterManager.assert_called_once_with( self.glusterfs_target1, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires=requires) self.assertEqual(fake_obj, ret) def test_compile_volume_pattern(self): volume_pattern = 'manila-share-\d+-(?P\d+)G$' ret = self._layout._compile_volume_pattern() self.assertEqual(re.compile(volume_pattern), ret) @ddt.data({'root@host1:/manila-share-1-1G': 'NONE', 'root@host2:/manila-share-2-2G': None}, {'root@host1:/manila-share-1-1G': FAKE_UUID1, 'root@host2:/manila-share-2-2G': None}, {'root@host1:/manila-share-1-1G': 'foobarbaz', 'root@host2:/manila-share-2-2G': FAKE_UUID2}, {'root@host1:/manila-share-1-1G': FAKE_UUID1, 'root@host2:/manila-share-2-2G': FAKE_UUID2}) def test_fetch_gluster_volumes(self, sharemark): vol1_qualified = 'root@host1:/manila-share-1-1G' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock( return_value=sharemark[vol1_qualified]) vol2_qualified = 'root@host2:/manila-share-2-2G' gmgr_vol2 = common.GlusterManager(vol2_qualified) gmgr_vol2.get_vol_option = mock.Mock( return_value=sharemark[vol2_qualified]) self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server1_volumes, ''))) self.mock_object( self.gmgr2, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server2_volumes, ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) expected_output = {} for q, d in self.glusterfs_volumes_dict.items(): if sharemark[q] not in (FAKE_UUID1, FAKE_UUID2): expected_output[q] = d ret = self._layout._fetch_gluster_volumes() test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.gmgr2.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) gmgr_vol1.get_vol_option.assert_called_once_with( 'user.manila-share') gmgr_vol2.get_vol_option.assert_called_once_with( 'user.manila-share') self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_no_filter_used(self): vol1_qualified = 'root@host1:/manila-share-1-1G' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock() vol2_qualified = 'root@host2:/manila-share-2-2G' gmgr_vol2 = common.GlusterManager(vol2_qualified) gmgr_vol2.get_vol_option = mock.Mock() self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server1_volumes, ''))) self.mock_object( self.gmgr2, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server2_volumes, ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) expected_output = self.glusterfs_volumes_dict ret = self._layout._fetch_gluster_volumes(filter_used=False) test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.gmgr2.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.assertFalse(gmgr_vol1.get_vol_option.called) self.assertFalse(gmgr_vol2.get_vol_option.called) self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_no_keymatch(self): vol1_qualified = 'root@host1:/manila-share-1' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock(return_value=None) self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=('manila-share-1', ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) self.mock_object(self._layout, 'volume_pattern', re.compile('manila-share-\d+(-(?P\d+)G)?$')) expected_output = {'root@host1:/manila-share-1': {'size': None}} ret = self._layout._fetch_gluster_volumes() test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_error(self): test_args = ('volume', 'list') def raise_exception(*args, **kwargs): if(args == test_args): raise exception.GlusterfsException() self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'gluster_call', mock.Mock(side_effect=raise_exception)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(layout_volume.LOG, 'error') self.assertRaises(exception.GlusterfsException, self._layout._fetch_gluster_volumes) self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) def test_do_setup(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout, '_fetch_gluster_volumes', mock.Mock(return_value=self.glusterfs_volumes_dict)) self.mock_object(self._layout, '_check_mount_glusterfs') self._layout.gluster_used_vols = self.glusterfs_used_vols self.mock_object(layout_volume.LOG, 'warning') self._layout.do_setup(self._context) self._layout._fetch_gluster_volumes.assert_called_once_with( filter_used=False) self._layout._check_mount_glusterfs.assert_called_once_with() self.gmgr1.get_gluster_version.assert_called_once_with() def test_do_setup_unsupported_glusterfs_version(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '5'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self.gmgr1.get_gluster_version.assert_called_once_with() @ddt.data(exception.GlusterfsException, RuntimeError) def test_do_setup_get_gluster_version_fails(self, exc): def raise_exception(*args, **kwargs): raise exc self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(side_effect=raise_exception)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.assertRaises(exc, self._layout.do_setup, self._context) self.gmgr1.get_gluster_version.assert_called_once_with() def test_do_setup_glusterfs_no_volumes_provided_by_backend(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout, '_fetch_gluster_volumes', mock.Mock(return_value={})) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self._layout._fetch_gluster_volumes.assert_called_once_with( filter_used=False) def test_share_manager(self): self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout.private_storage, 'get', mock.Mock(return_value='host1:/gv1')) ret = self._layout._share_manager(self.share1) self._layout.private_storage.get.assert_called_once_with( self.share1['id'], 'volume') self._layout._glustermanager.assert_called_once_with('host1:/gv1') self.assertEqual(self.gmgr1, ret) def test_share_manager_no_privdata(self): self.mock_object(self._layout.private_storage, 'get', mock.Mock(return_value=None)) ret = self._layout._share_manager(self.share1) self._layout.private_storage.get.assert_called_once_with( self.share1['id'], 'volume') self.assertEqual(None, ret) def test_ensure_share(self): share = self.share1 gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) gmgr1.set_vol_option = mock.Mock() self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=gmgr1)) self._layout.ensure_share(self._context, share) self._layout._share_manager.assert_called_once_with(share) self.assertIn(self.glusterfs_target1, self._layout.gluster_used_vols) gmgr1.set_vol_option.assert_called_once_with( 'user.manila-share', share['id']) @ddt.data({"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": 1, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": 2, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": None, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 1, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 2, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 3, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": None, "expected": "host:/share"}, {"voldict": {"host:/share": {}}, "used_vols": set(), "size": 1, "expected": "host:/share"}, {"voldict": {"host:/share": {}}, "used_vols": set(), "size": None, "expected": "host:/share"}) @ddt.unpack def test_pop_gluster_vol(self, voldict, used_vols, size, expected): gmgr = common.GlusterManager gmgr1 = gmgr(expected, self._execute, None, None) self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict) self._layout.gluster_used_vols = used_vols self._layout._glustermanager = mock.Mock(return_value=gmgr1) self._layout.volume_pattern_keys = list(voldict.values())[0].keys() result = self._layout._pop_gluster_vol(size=size) self.assertEqual(expected, result) self.assertIn(result, used_vols) self._layout._fetch_gluster_volumes.assert_called_once_with() self._layout._glustermanager.assert_called_once_with(result) @ddt.data({"voldict": {"share2G": {"size": 2}}, "used_vols": set(), "size": 3}, {"voldict": {"share2G": {"size": 2}}, "used_vols": set(["share2G"]), "size": None}) @ddt.unpack def test_pop_gluster_vol_excp(self, voldict, used_vols, size): self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict) self._layout.gluster_used_vols = used_vols self._layout.volume_pattern_keys = list(voldict.values())[0].keys() self.assertRaises(exception.GlusterfsException, self._layout._pop_gluster_vol, size=size) self._layout._fetch_gluster_volumes.assert_called_once_with() self.assertFalse( self.fake_driver._setup_via_manager.called) def test_push_gluster_vol(self): self._layout.gluster_used_vols = set([ self.glusterfs_target1, self.glusterfs_target2]) self._layout._push_gluster_vol(self.glusterfs_target2) self.assertEqual(1, len(self._layout.gluster_used_vols)) self.assertFalse( self.glusterfs_target2 in self._layout.gluster_used_vols) def test_push_gluster_vol_excp(self): self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.gluster_unused_vols_dict = {} self.assertRaises(exception.GlusterfsException, self._layout._push_gluster_vol, self.glusterfs_target2) @ddt.data({'vers_minor': '6', 'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '-delete']}, {'vers_minor': '7', 'cmd': ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '!', '-path', '/tmp/tmpKGHKJ/.trashcan', '!', '-path', '/tmp/tmpKGHKJ/.trashcan/internal_op', '-delete']}) @ddt.unpack def test_wipe_gluster_vol(self, vers_minor, cmd): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object(self.fake_driver, '_execute', mock.Mock()) self.mock_object(common, '_mount_gluster_vol', mock.Mock()) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self._layout._wipe_gluster_vol(gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) kwargs = {'run_as_root': True} self.fake_driver._execute.assert_called_once_with( *cmd, **kwargs) common._umount_gluster_vol.assert_called_once_with( self.fake_driver._execute, tmpdir) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_wipe_gluster_vol_mount_fail(self): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', '6')} self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object(self.fake_driver, '_execute', mock.Mock()) self.mock_object(common, '_mount_gluster_vol', mock.Mock(side_effect=exception.GlusterfsException)) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self.assertRaises(exception.GlusterfsException, self._layout._wipe_gluster_vol, gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) self.assertFalse(self.fake_driver._execute.called) self.assertFalse(common._umount_gluster_vol.called) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_wipe_gluster_vol_error_wiping_gluster_vol(self): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', '6')} cmd = ['find', '/tmp/tmpKGHKJ', '-mindepth', '1', '-delete'] self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object( self.fake_driver, '_execute', mock.Mock(side_effect=exception.ProcessExecutionError)) self.mock_object(common, '_mount_gluster_vol', mock.Mock()) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self.assertRaises(exception.GlusterfsException, self._layout._wipe_gluster_vol, gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) kwargs = {'run_as_root': True} self.fake_driver._execute.assert_called_once_with( *cmd, **kwargs) common._umount_gluster_vol.assert_called_once_with( self.fake_driver._execute, tmpdir) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_create_share(self): self._layout._pop_gluster_vol = mock.Mock( return_value=self.glusterfs_target1) gmgr1 = common.GlusterManager(self.glusterfs_target1) gmgr1.set_vol_option = mock.Mock() self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self.mock_object(self.fake_driver, '_setup_via_manager', mock.Mock(return_value='host1:/gv1')) share = new_share() exp_locn = self._layout.create_share(self._context, share) self._layout._pop_gluster_vol.assert_called_once_with(share['size']) self.fake_driver._setup_via_manager.assert_called_once_with( {'manager': gmgr1, 'share': share}) self._layout.private_storage.update.assert_called_once_with( share['id'], {'volume': self.glusterfs_target1}) gmgr1.set_vol_option.assert_called_once_with( 'user.manila-share', share['id']) self.assertEqual('host1:/gv1', exp_locn) def test_create_share_error(self): self._layout._pop_gluster_vol = mock.Mock( side_effect=exception.GlusterfsException) share = new_share() self.assertRaises(exception.GlusterfsException, self._layout.create_share, self._context, share) self._layout._pop_gluster_vol.assert_called_once_with( share['size']) @ddt.data(None, '', 'Eeyore') def test_delete_share(self, clone_of): self._layout._push_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.set_vol_option = mock.Mock() gmgr1.get_vol_option = mock.Mock(return_value=clone_of) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.delete_share(self._context, self.share1) gmgr1.get_vol_option.assert_called_once_with( 'user.manila-cloned-from') self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1) self._layout._push_gluster_vol.assert_called_once_with( self.glusterfs_target1) self._layout.private_storage.delete.assert_called_once_with( self.share1['id']) gmgr1.set_vol_option.assert_called_once_with( 'user.manila-share', 'NONE') def test_delete_share_clone(self): self._layout._push_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.gluster_call = mock.Mock() gmgr1.get_vol_option = mock.Mock(return_value=FAKE_UUID1) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.delete_share(self._context, self.share1) gmgr1.get_vol_option.assert_called_once_with( 'user.manila-cloned-from') self.assertFalse(self._layout._wipe_gluster_vol.called) self._layout._push_gluster_vol.assert_called_once_with( self.glusterfs_target1) self._layout.private_storage.delete.assert_called_once_with( self.share1['id']) gmgr1.gluster_call.assert_called_once_with( 'volume', 'delete', 'gv1') def test_delete_share_error(self): self._layout._wipe_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol.side_effect = ( exception.GlusterfsException) self._layout._push_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.get_vol_option = mock.Mock(return_value=None) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.assertRaises(exception.GlusterfsException, self._layout.delete_share, self._context, self.share1) self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1) self.assertFalse(self._layout._push_gluster_vol.called) def test_delete_share_missing_record(self): self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=None)) self._layout.delete_share(self._context, self.share1) self._layout._share_manager.assert_called_once_with(self.share1) def test_create_snapshot(self): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock( side_effect=(glusterXMLOut(ret=0, errno=0),))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout.create_snapshot(self._context, snapshot) self.assertIsNone(ret) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=2),), '_exception': exception.GlusterfsException}, {'side_effect': (('', ''),), '_exception': exception.GlusterfsException}) @ddt.unpack def test_create_snapshot_error(self, side_effect, _exception): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock(side_effect=side_effect)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(_exception, self._layout.create_snapshot, self._context, snapshot) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException}, {"vers_minor": '7', "exctype": exception.ShareSnapshotNotSupported}) @ddt.unpack def test_create_snapshot_no_snap(self, vers_minor, exctype): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock( side_effect=(glusterXMLOut(ret=-1, errno=0),))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exctype, self._layout.create_snapshot, self._context, snapshot) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException}, {"vers_minor": '7', "exctype": exception.ShareSnapshotNotSupported}) @ddt.unpack def test_create_snapshot_no_snap_cached(self, vers_minor, exctype): self._layout.gluster_nosnap_vols_dict = { self.glusterfs_target1: 'fake error'} self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} self._layout.gluster_used_vols = set([self.glusterfs_target1]) gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exctype, self._layout.create_snapshot, self._context, snapshot) def test_find_actual_backend_snapshot_name(self): gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self.mock_object(gmgr1, 'gluster_call', mock.Mock(return_value=('fake_snap_id_xyz', ''))) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout._find_actual_backend_snapshot_name(gmgr1, snapshot) args = ('snapshot', 'list', gmgr1.volume, '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) self.assertEqual('fake_snap_id_xyz', ret) @ddt.data('this is too bad', 'fake_snap_id_xyx\nfake_snap_id_pqr') def test_find_actual_backend_snapshot_name_bad_snap_list(self, snaplist): gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self.mock_object(gmgr1, 'gluster_call', mock.Mock(return_value=(snaplist, ''))) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exception.GlusterfsException, self._layout._find_actual_backend_snapshot_name, gmgr1, snapshot) args = ('snapshot', 'list', gmgr1.volume, '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({'glusterfs_target': 'root@host1:/gv1', 'glusterfs_server': 'root@host1'}, {'glusterfs_target': 'host1:/gv1', 'glusterfs_server': 'host1'}) @ddt.unpack def test_create_share_from_snapshot(self, glusterfs_target, glusterfs_server): share = new_share() snapshot = { 'id': 'fake_snap_id', 'share_instance': new_share(export_location=glusterfs_target), 'share_id': 'fake_share_id', } volume = ''.join(['manila-', share['id']]) new_vol_addr = ':/'.join([glusterfs_server, volume]) gmgr = common.GlusterManager old_gmgr = gmgr(glusterfs_target, self._execute, None, None) new_gmgr = gmgr(new_vol_addr, self._execute, None, None) self._layout.gluster_used_vols = set([glusterfs_target]) self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')} self.mock_object(old_gmgr, 'gluster_call', mock.Mock(side_effect=[('', ''), ('', '')])) self.mock_object(new_gmgr, 'gluster_call', mock.Mock(side_effect=[('', ''), ('', ''), ('', '')])) self.mock_object(new_gmgr, 'get_vol_option', mock.Mock()) new_gmgr.get_vol_option.return_value = ( 'glusterfs-server-1,client') self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=old_gmgr)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=new_gmgr)) self.mock_object(self.fake_driver, '_setup_via_manager', mock.Mock(return_value='host1:/gv1')) ret = self._layout.create_share_from_snapshot( self._context, share, snapshot, None) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(old_gmgr, snapshot)) args = (('snapshot', 'activate', 'fake_snap_id_xyz', 'force', '--mode=script'), ('snapshot', 'clone', volume, 'fake_snap_id_xyz')) old_gmgr.gluster_call.assert_has_calls( [mock.call(*a, log=mock.ANY) for a in args]) args = (('volume', 'start', volume), ('volume', 'set', volume, 'user.manila-share', share['id']), ('volume', 'set', volume, 'user.manila-cloned-from', snapshot['share_id'])) new_gmgr.gluster_call.assert_has_calls( [mock.call(*a, log=mock.ANY) for a in args], any_order=True) self._layout._share_manager.assert_called_once_with( snapshot['share_instance']) self._layout._glustermanager.assert_called_once_with( gmgr.parse(new_vol_addr)) self._layout.driver._setup_via_manager.assert_called_once_with( {'manager': new_gmgr, 'share': share}, {'manager': old_gmgr, 'share': snapshot['share_instance']}) self._layout.private_storage.update.assert_called_once_with( share['id'], {'volume': new_vol_addr}) self.assertIn( new_vol_addr, self._layout.gluster_used_vols) self.assertEqual('host1:/gv1', ret) def test_create_share_from_snapshot_error_unsupported_gluster_version( self): glusterfs_target = 'root@host1:/gv1' glusterfs_server = 'root@host1' share = new_share() volume = ''.join(['manila-', share['id']]) new_vol_addr = ':/'.join([glusterfs_server, volume]) gmgr = common.GlusterManager old_gmgr = gmgr(glusterfs_target, self._execute, None, None) new_gmgr = gmgr(new_vol_addr, self._execute, None, None) self._layout.gluster_used_vols_dict = {glusterfs_target: old_gmgr} self._layout.glusterfs_versions = {glusterfs_server: ('3', '6')} self.mock_object( old_gmgr, 'gluster_call', mock.Mock(side_effect=[('', ''), ('', '')])) self.mock_object(new_gmgr, 'get_vol_option', mock.Mock()) new_gmgr.get_vol_option.return_value = ( 'glusterfs-server-1,client') self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=old_gmgr)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=new_gmgr)) snapshot = { 'id': 'fake_snap_id', 'share_instance': new_share(export_location=glusterfs_target) } self.assertRaises(exception.GlusterfsException, self._layout.create_share_from_snapshot, self._context, share, snapshot) self.assertFalse( self._layout._find_actual_backend_snapshot_name.called) self.assertFalse(old_gmgr.gluster_call.called) self._layout._share_manager.assert_called_once_with( snapshot['share_instance']) self.assertFalse(self._layout._glustermanager.called) self.assertFalse(new_gmgr.get_vol_option.called) self.assertFalse(new_gmgr.gluster_call.called) self.assertNotIn(new_vol_addr, self._layout.glusterfs_versions.keys()) def test_delete_snapshot(self): self._layout.gluster_nosnap_vols_dict = {} gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object( gmgr1, 'gluster_call', mock.Mock(return_value=glusterXMLOut(ret=0, errno=0))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout.delete_snapshot(self._context, snapshot) self.assertIsNone(ret) args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz', '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(gmgr1, snapshot)) @ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=0),), '_exception': exception.GlusterfsException}, {'side_effect': (('', ''),), '_exception': exception.GlusterfsException}) @ddt.unpack def test_delete_snapshot_error(self, side_effect, _exception): self._layout.gluster_nosnap_vols_dict = {} gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz', '--mode=script') self.mock_object( gmgr1, 'gluster_call', mock.Mock(side_effect=side_effect)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(_exception, self._layout.delete_snapshot, self._context, snapshot) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(gmgr1, snapshot)) @ddt.data( ('manage_existing', ('share', 'driver_options'), {}), ('unmanage', ('share',), {}), ('extend_share', ('share', 'new_size'), {'share_server': None}), ('shrink_share', ('share', 'new_size'), {'share_server': None})) def test_nonimplemented_methods(self, method_invocation): method, args, kwargs = method_invocation self.assertRaises(NotImplementedError, getattr(self._layout, method), *args, **kwargs) manila-2.0.0/manila/tests/share/drivers/windows/0000775000567000056710000000000012701407265022774 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/windows/test_windows_utils.py0000664000567000056710000003773712701407107027333 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from manila.share.drivers.windows import windows_utils from manila import test @ddt.ddt class WindowsUtilsTestCase(test.TestCase): def setUp(self): self._remote_exec = mock.Mock() self._windows_utils = windows_utils.WindowsUtils(self._remote_exec) super(WindowsUtilsTestCase, self).setUp() def test_initialize_disk(self): self._windows_utils.initialize_disk(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["Initialize-Disk", "-Number", mock.sentinel.disk_number] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_create_partition(self): self._windows_utils.create_partition(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["New-Partition", "-DiskNumber", mock.sentinel.disk_number, "-UseMaximumSize"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_format_partition(self): self._windows_utils.format_partition(mock.sentinel.server, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ("Get-Partition -DiskNumber %(disk_number)s " "-PartitionNumber %(partition_number)s | " "Format-Volume -FileSystem NTFS -Force -Confirm:$false" % { 'disk_number': mock.sentinel.disk_number, 'partition_number': mock.sentinel.partition_number, }) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_add_access_path(self): self._windows_utils.add_access_path(mock.sentinel.server, mock.sentinel.mount_path, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ["Add-PartitionAccessPath", "-DiskNumber", mock.sentinel.disk_number, "-PartitionNumber", mock.sentinel.partition_number, "-AccessPath", self._windows_utils.quote_string( mock.sentinel.mount_path) ] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_resize_partition(self): self._windows_utils.resize_partition(mock.sentinel.server, mock.sentinel.size_bytes, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ['Resize-Partition', '-DiskNumber', mock.sentinel.disk_number, '-PartitionNumber', mock.sentinel.partition_number, '-Size', mock.sentinel.size_bytes] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data("1", "") def test_get_disk_number_by_serial_number(self, disk_number): mock_serial_number = "serial_number" self._remote_exec.return_value = (disk_number, mock.sentinel.std_err) expected_disk_number = int(disk_number) if disk_number else None result = self._windows_utils.get_disk_number_by_serial_number( mock.sentinel.server, mock_serial_number) pattern = "%s*" % mock_serial_number cmd = ("Get-Disk | " "Where-Object {$_.SerialNumber -like '%s'} | " "Select-Object -ExpandProperty Number" % pattern) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(expected_disk_number, result) @ddt.data("1", "") def test_get_disk_number_by_mount_path(self, disk_number): fake_mount_path = "fake_mount_path" self._remote_exec.return_value = (disk_number, mock.sentinel.std_err) expected_disk_number = int(disk_number) if disk_number else None result = self._windows_utils.get_disk_number_by_mount_path( mock.sentinel.server, fake_mount_path) cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Select-Object -ExpandProperty DiskNumber' % (fake_mount_path + "\\")) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(expected_disk_number, result) def test_get_volume_path_by_mount_path(self): fake_mount_path = "fake_mount_path" fake_volume_path = "fake_volume_path" self._remote_exec.return_value = fake_volume_path + '\r\n', None result = self._windows_utils.get_volume_path_by_mount_path( mock.sentinel.server, fake_mount_path) cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Get-Volume | ' 'Select-Object -ExpandProperty Path' % (fake_mount_path + "\\")) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_volume_path, result) def test_get_disk_space_by_path(self): fake_disk_size = 1024 fake_free_bytes = 1000 fake_fsutil_output = ("Total # of bytes : %(total_bytes)s" "Total # of avail free bytes : %(free_bytes)s" % dict(total_bytes=fake_disk_size, free_bytes=fake_free_bytes)) self._remote_exec.return_value = fake_fsutil_output, None result = self._windows_utils.get_disk_space_by_path( mock.sentinel.server, mock.sentinel.mount_path) cmd = ["fsutil", "volume", "diskfree", self._windows_utils.quote_string(mock.sentinel.mount_path)] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual((fake_disk_size, fake_free_bytes), result) def test_get_partition_maximum_size(self): fake_max_size = 1024 self._remote_exec.return_value = ("%s" % fake_max_size, mock.sentinel.std_err) result = self._windows_utils.get_partition_maximum_size( mock.sentinel.server, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ('Get-PartitionSupportedSize -DiskNumber %(disk_number)s ' '-PartitionNumber %(partition_number)s | ' 'Select-Object -ExpandProperty SizeMax' % dict(disk_number=mock.sentinel.disk_number, partition_number=mock.sentinel.partition_number)) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_max_size, result) def test_set_disk_online_status(self): self._windows_utils.set_disk_online_status(mock.sentinel.server, mock.sentinel.disk_number, online=True) cmd = ["Set-Disk", "-Number", mock.sentinel.disk_number, "-IsOffline", 0] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_disk_readonly_status(self): self._windows_utils.set_disk_readonly_status(mock.sentinel.server, mock.sentinel.disk_number, readonly=False) cmd = ["Set-Disk", "-Number", mock.sentinel.disk_number, "-IsReadOnly", 0] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_update_disk(self): self._windows_utils.update_disk(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["Update-Disk", mock.sentinel.disk_number] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_join_domain(self): mock_server = {'ip': mock.sentinel.server_ip} self._windows_utils.join_domain(mock_server, mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % mock.sentinel.admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % mock.sentinel.admin_username), ('Add-Computer -DomainName "%s" -Credential $credential' % mock.sentinel.domain)] cmd = ";".join(cmds) self._remote_exec.assert_called_once_with(mock_server, cmd) def test_unjoin_domain(self): self._windows_utils.unjoin_domain(mock.sentinel.server, mock.sentinel.admin_username, mock.sentinel.admin_password) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % mock.sentinel.admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % mock.sentinel.admin_username), ('Remove-Computer -UnjoinDomaincredential $credential ' '-Passthru -Verbose -Force')] cmd = ";".join(cmds) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_get_current_domain(self): fake_domain = " domain" self._remote_exec.return_value = (fake_domain, mock.sentinel.std_err) result = self._windows_utils.get_current_domain(mock.sentinel.server) cmd = "(Get-WmiObject Win32_ComputerSystem).Domain" self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_domain.strip(), result) def test_ensure_directory_exists(self): self._windows_utils.ensure_directory_exists(mock.sentinel.server, mock.sentinel.path) cmd = ["New-Item", "-ItemType", "Directory", "-Force", "-Path", self._windows_utils.quote_string(mock.sentinel.path)] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data(False, True) @mock.patch.object(windows_utils.WindowsUtils, 'path_exists') def test_remove(self, is_junction, mock_path_exists): recurse = True self._windows_utils.remove(mock.sentinel.server, mock.sentinel.path, is_junction=is_junction, recurse=recurse) if is_junction: cmd = ('[System.IO.Directory]::Delete(' '%(path)s, %(recurse)d)' % dict(path=self._windows_utils.quote_string( mock.sentinel.path), recurse=recurse)) else: cmd = ["Remove-Item", "-Confirm:$false", "-Path", self._windows_utils.quote_string(mock.sentinel.path), "-Force", '-Recurse'] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_utils.WindowsUtils, 'path_exists') def test_remove_unexisting_path(self, mock_path_exists): mock_path_exists.return_value = False self._windows_utils.remove(mock.sentinel.server, mock.sentinel.path) self.assertFalse(self._remote_exec.called) @ddt.data("True", "False") def test_path_exists(self, path_exists): self._remote_exec.return_value = (path_exists, mock.sentinel.std_err) result = self._windows_utils.path_exists(mock.sentinel.server, mock.sentinel.path) cmd = ["Test-Path", mock.sentinel.path] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(path_exists == "True", result) def test_normalize_path(self): fake_path = "C:/" result = self._windows_utils.normalize_path(fake_path) self.assertEqual("C:\\", result) def test_get_interface_index_by_ip(self): _FAKE_INDEX = "2" self._remote_exec.return_value = (_FAKE_INDEX, mock.sentinel.std_err) result = self._windows_utils.get_interface_index_by_ip( mock.sentinel.server, mock.sentinel.ip) cmd = ('Get-NetIPAddress | ' 'Where-Object {$_.IPAddress -eq "%(ip)s"} | ' 'Select-Object -ExpandProperty InterfaceIndex' % dict(ip=mock.sentinel.ip)) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(int(_FAKE_INDEX), result) def test_set_dns_client_search_list(self): mock_search_list = ["A", "B", "C"] self._windows_utils.set_dns_client_search_list(mock.sentinel.server, mock_search_list) cmd = ["Set-DnsClientGlobalSetting", "-SuffixSearchList", "@('A','B','C')"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_dns_client_server_addresses(self): mock_dns_servers = ["A", "B", "C"] self._windows_utils.set_dns_client_server_addresses( mock.sentinel.server, mock.sentinel.if_index, mock_dns_servers) cmd = ["Set-DnsClientServerAddress", "-InterfaceIndex", mock.sentinel.if_index, "-ServerAddresses", "('A','B','C')"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_win_reg_value(self): self._windows_utils.set_win_reg_value(mock.sentinel.server, mock.sentinel.path, mock.sentinel.key, mock.sentinel.value) cmd = ['Set-ItemProperty', '-Path', self._windows_utils.quote_string(mock.sentinel.path), '-Name', mock.sentinel.key, '-Value', mock.sentinel.value] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data(None, mock.sentinel.key_name) def test_get_win_reg_value(self, key_name): self._remote_exec.return_value = (mock.sentinel.value, mock.sentinel.std_err) result = self._windows_utils.get_win_reg_value(mock.sentinel.server, mock.sentinel.path, name=key_name) cmd = "Get-ItemProperty -Path %s" % ( self._windows_utils.quote_string(mock.sentinel.path)) if key_name: cmd += " | Select-Object -ExpandProperty %s" % key_name self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd, retry=False) self.assertEqual(mock.sentinel.value, result) def test_quote_string(self): result = self._windows_utils.quote_string(mock.sentinel.string) self.assertEqual('"%s"' % mock.sentinel.string, result) manila-2.0.0/manila/tests/share/drivers/windows/test_winrm_helper.py0000664000567000056710000002637112701407107027104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_concurrency import processutils from oslo_utils import importutils from oslo_utils import strutils from manila import exception from manila.share.drivers.windows import winrm_helper from manila import test @ddt.ddt class WinRMHelperTestCase(test.TestCase): _FAKE_SERVER = {'ip': mock.sentinel.ip} @mock.patch.object(importutils, 'import_module') def setUp(self, mock_import_module): self._winrm = winrm_helper.WinRMHelper() super(WinRMHelperTestCase, self).setUp() @ddt.data({'import_exc': None}, {'import_exc': ImportError}) @mock.patch.object(importutils, 'import_module') @ddt.unpack def test_setup_winrm(self, mock_import_module, import_exc): winrm_helper.winrm = None mock_import_module.side_effect = import_exc if import_exc: self.assertRaises(exception.ShareBackendException, winrm_helper.setup_winrm) else: winrm_helper.setup_winrm() self.assertEqual(mock_import_module.return_value, winrm_helper.winrm) mock_import_module.assert_called_once_with('winrm') @mock.patch.object(winrm_helper.WinRMHelper, '_get_auth') @mock.patch.object(winrm_helper, 'WinRMConnection') def test_get_conn(self, mock_conn_cls, mock_get_auth): mock_auth = {'mock_auth_key': mock.sentinel.auth_opt} mock_get_auth.return_value = mock_auth conn = self._winrm._get_conn(self._FAKE_SERVER) mock_get_auth.assert_called_once_with(self._FAKE_SERVER) mock_conn_cls.assert_called_once_with( ip=self._FAKE_SERVER['ip'], conn_timeout=self._winrm._config.winrm_conn_timeout, operation_timeout=self._winrm._config.winrm_operation_timeout, **mock_auth) self.assertEqual(mock_conn_cls.return_value, conn) @ddt.data({}, {'exit_code': 1}, {'exit_code': 1, 'check_exit_code': False}) @mock.patch.object(strutils, 'mask_password') @mock.patch.object(winrm_helper.WinRMHelper, '_parse_command') @mock.patch.object(winrm_helper.WinRMHelper, '_get_conn') @ddt.unpack def test_execute(self, mock_get_conn, mock_parse_command, mock_mask_password, check_exit_code=True, exit_code=0): mock_parse_command.return_value = (mock.sentinel.parsed_cmd, mock.sentinel.sanitized_cmd) mock_conn = mock_get_conn.return_value mock_conn.execute.return_value = (mock.sentinel.stdout, mock.sentinel.stderr, exit_code) if exit_code == 0 or not check_exit_code: result = self._winrm.execute(mock.sentinel.server, mock.sentinel.command, check_exit_code=check_exit_code, retry=False) expected_result = (mock.sentinel.stdout, mock.sentinel.stderr) self.assertEqual(expected_result, result) else: self.assertRaises(processutils.ProcessExecutionError, self._winrm.execute, mock.sentinel.server, mock.sentinel.command, check_exit_code=check_exit_code, retry=False) mock_get_conn.assert_called_once_with(mock.sentinel.server) mock_parse_command.assert_called_once_with(mock.sentinel.command) mock_conn.execute.assert_called_once_with(mock.sentinel.parsed_cmd) mock_mask_password.assert_has_calls([mock.call(mock.sentinel.stdout), mock.call(mock.sentinel.stderr)]) @mock.patch('base64.b64encode') @mock.patch.object(strutils, 'mask_password') def test_parse_command(self, mock_mask_password, mock_base64): mock_mask_password.return_value = mock.sentinel.sanitized_cmd mock_base64.return_value = mock.sentinel.encoded_string cmd = ('Get-Disk', '-Number', 1) result = self._winrm._parse_command(cmd) joined_cmd = 'Get-Disk -Number 1' expected_command = ("powershell.exe -ExecutionPolicy RemoteSigned " "-NonInteractive -EncodedCommand %s" % mock.sentinel.encoded_string) expected_result = expected_command, mock.sentinel.sanitized_cmd mock_mask_password.assert_called_once_with(joined_cmd) mock_base64.assert_called_once_with(joined_cmd.encode("utf_16_le")) self.assertEqual(expected_result, result) def _test_get_auth(self, use_cert_auth=False): mock_server = {'use_cert_auth': use_cert_auth, 'cert_pem_path': mock.sentinel.pem_path, 'cert_key_pem_path': mock.sentinel.key_path, 'username': mock.sentinel.username, 'password': mock.sentinel.password} result = self._winrm._get_auth(mock_server) expected_result = {'username': mock_server['username']} if use_cert_auth: expected_result['cert_pem_path'] = mock_server['cert_pem_path'] expected_result['cert_key_pem_path'] = ( mock_server['cert_key_pem_path']) else: expected_result['password'] = mock_server['password'] self.assertEqual(expected_result, result) def test_get_auth_using_certificates(self): self._test_get_auth(use_cert_auth=True) def test_get_auth_using_password(self): self._test_get_auth() class WinRMConnectionTestCase(test.TestCase): @mock.patch.object(winrm_helper, 'setup_winrm') @mock.patch.object(winrm_helper, 'winrm') @mock.patch.object(winrm_helper.WinRMConnection, '_get_url') @mock.patch.object(winrm_helper.WinRMConnection, '_get_default_port') def setUp(self, mock_get_port, mock_get_url, mock_winrm, mock_setup_winrm): self._winrm = winrm_helper.WinRMConnection() self._mock_conn = mock_winrm.protocol.Protocol.return_value super(WinRMConnectionTestCase, self).setUp() @mock.patch.object(winrm_helper, 'setup_winrm') @mock.patch.object(winrm_helper, 'winrm') @mock.patch.object(winrm_helper.WinRMConnection, '_get_url') @mock.patch.object(winrm_helper.WinRMConnection, '_get_default_port') def test_init_conn(self, mock_get_port, mock_get_url, mock_winrm, mock_setup_winrm): # certificates are passed so we expect cert auth to be used cert_auth = True winrm_conn = winrm_helper.WinRMConnection( ip=mock.sentinel.ip, username=mock.sentinel.username, password=mock.sentinel.password, cert_pem_path=mock.sentinel.cert_pem_path, cert_key_pem_path=mock.sentinel.cert_key_pem_path, operation_timeout=mock.sentinel.operation_timeout, conn_timeout=mock.sentinel.conn_timeout) mock_get_port.assert_called_once_with(cert_auth) mock_get_url.assert_called_once_with(mock.sentinel.ip, mock_get_port.return_value, cert_auth) mock_winrm.protocol.Protocol.assert_called_once_with( endpoint=mock_get_url.return_value, transport=winrm_helper.TRANSPORT_SSL, username=mock.sentinel.username, password=mock.sentinel.password, cert_pem=mock.sentinel.cert_pem_path, cert_key_pem=mock.sentinel.cert_key_pem_path) self.assertEqual(mock_winrm.protocol.Protocol.return_value, winrm_conn._conn) self.assertEqual(mock.sentinel.conn_timeout, winrm_conn._conn.transport.timeout) winrm_conn._conn.set_timeout.assert_called_once_with( mock.sentinel.operation_timeout) def test_get_default_port_https(self): port = self._winrm._get_default_port(use_ssl=True) self.assertEqual(winrm_helper.DEFAULT_PORT_HTTPS, port) def test_get_default_port_http(self): port = self._winrm._get_default_port(use_ssl=False) self.assertEqual(winrm_helper.DEFAULT_PORT_HTTP, port) def _test_get_url(self, ip=None, use_ssl=True): if not ip: self.assertRaises(exception.ShareBackendException, self._winrm._get_url, ip=ip, port=mock.sentinel.port, use_ssl=use_ssl) else: url = self._winrm._get_url(ip=ip, port=mock.sentinel.port, use_ssl=use_ssl) expected_protocol = 'https' if use_ssl else 'http' expected_url = self._winrm._URL_TEMPLATE % dict( protocol=expected_protocol, port=mock.sentinel.port, ip=ip) self.assertEqual(expected_url, url) def test_get_url_using_ssl(self): self._test_get_url(ip=mock.sentinel.ip) def test_get_url_using_plaintext(self): self._test_get_url(ip=mock.sentinel.ip, use_ssl=False) def test_get_url_missing_ip(self): self._test_get_url() def _test_execute(self, get_output_exception=None): self._mock_conn.open_shell.return_value = mock.sentinel.shell_id self._mock_conn.run_command.return_value = mock.sentinel.cmd_id command_output = (mock.sentinel.stdout, mock.sentinel.stderr, mock.sentinel.exit_code) if get_output_exception: self._mock_conn.get_command_output.side_effect = ( get_output_exception) self.assertRaises( get_output_exception, self._winrm.execute, mock.sentinel.cmd) else: self._mock_conn.get_command_output.return_value = command_output result = self._winrm.execute(mock.sentinel.cmd) self.assertEqual(command_output, result) self._mock_conn.open_shell.assert_called_once_with() self._mock_conn.run_command.assert_called_once_with( mock.sentinel.shell_id, mock.sentinel.cmd) self._mock_conn.cleanup_command.assert_called_once_with( mock.sentinel.shell_id, mock.sentinel.cmd_id) self._mock_conn.close_shell.assert_called_once_with( mock.sentinel.shell_id) def test_execute(self): self._test_execute() def test_execute_exception(self): self._test_execute(get_output_exception=Exception) manila-2.0.0/manila/tests/share/drivers/windows/__init__.py0000664000567000056710000000000012701407107025066 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/share/drivers/windows/test_windows_smb_driver.py0000664000567000056710000003330212701407107030307 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import os from manila.common import constants as const from manila import exception from manila.share import configuration from manila.share.drivers import generic from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_smb_driver as windows_drv from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper from manila import test from manila.tests import fake_share @ddt.ddt class WindowsSMBDriverTestCase(test.TestCase): @mock.patch.object(winrm_helper, 'WinRMHelper') @mock.patch.object(windows_utils, 'WindowsUtils') @mock.patch.object(windows_smb_helper, 'WindowsSMBHelper') @mock.patch.object(service_instance, 'WindowsServiceInstanceManager') def setUp(self, mock_sv_instance_mgr, mock_smb_helper_cls, mock_utils_cls, mock_winrm_helper_cls): self.flags(driver_handles_share_servers=True) self._fake_conf = configuration.Configuration(None) self._share = fake_share.fake_share(share_proto='SMB') self._share_server = dict( backend_details=mock.sentinel.backend_details) self._drv = windows_drv.WindowsSMBDriver( configuration=self._fake_conf) self._drv._setup_helpers() self._remote_execute = mock_winrm_helper_cls.return_value self._windows_utils = mock_utils_cls.return_value self._smb_helper = mock_smb_helper_cls.return_value super(WindowsSMBDriverTestCase, self).setUp() @mock.patch('manila.share.driver.ShareDriver') def test_update_share_stats(self, mock_base_driver): self._drv._update_share_stats() mock_base_driver._update_share_stats.assert_called_once_with( self._drv, data=dict(storage_protocol="CIFS")) @mock.patch.object(service_instance, 'WindowsServiceInstanceManager') def test_setup_service_instance_manager(self, mock_sv_instance_mgr): self._drv._setup_service_instance_manager() mock_sv_instance_mgr.assert_called_once_with( driver_config=self._fake_conf) def test_setup_helpers(self): expected_helpers = {"SMB": self._smb_helper, "CIFS": self._smb_helper} self._drv._setup_helpers() self.assertEqual(expected_helpers, self._drv._helpers) @mock.patch.object(generic.GenericShareDriver, '_teardown_server') def test_teardown_server(self, mock_super_teardown): mock_server = {'joined_domain': True, 'instance_id': mock.sentinel.instance_id} mock_sec_service = {'user': mock.sentinel.user, 'password': mock.sentinel.password, 'domain': mock.sentinel.domain} sv_mgr = self._drv.service_instance_manager sv_mgr.get_valid_security_service.return_value = mock_sec_service # We ensure that domain unjoin exceptions do not prevent the # service instance from being teared down. self._windows_utils.unjoin_domain.side_effect = Exception self._drv._teardown_server(mock_server, mock_sec_service) sv_mgr.get_valid_security_service.assert_called_once_with( mock_sec_service) self._windows_utils.unjoin_domain.assert_called_once_with( mock_server, mock_sec_service['user'], mock_sec_service['password']) mock_super_teardown.assert_called_once_with(mock_server, mock_sec_service) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') def test_format_device(self, mock_get_disk_number): mock_get_disk_number.return_value = mock.sentinel.disk_number self._drv._format_device(mock.sentinel.server, mock.sentinel.vol) self._drv._get_disk_number.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol) self._windows_utils.initialize_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.create_partition.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.format_partition.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) @mock.patch.object(windows_drv.WindowsSMBDriver, '_ensure_disk_online_and_writable') @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_mount_path') @mock.patch.object(windows_drv.WindowsSMBDriver, '_is_device_mounted') def test_mount_device(self, mock_device_mounted, mock_get_mount_path, mock_get_disk_number, mock_ensure_disk): mock_get_mount_path.return_value = mock.sentinel.mount_path mock_get_disk_number.return_value = mock.sentinel.disk_number mock_device_mounted.return_value = False self._drv._mount_device(share=mock.sentinel.share, server_details=mock.sentinel.server, volume=mock.sentinel.vol) mock_device_mounted.assert_called_once_with( mock.sentinel.mount_path, mock.sentinel.server, mock.sentinel.vol) mock_get_disk_number.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol) self._windows_utils.ensure_directory_exists.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self._windows_utils.add_access_path( mock.sentinel.server, mock.sentinel.mount_path, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) mock_ensure_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_mount_path') def test_unmount_device(self, mock_get_mount_path): mock_get_mount_path.return_value = mock.sentinel.mount_path mock_get_disk_number_by_path = ( self._windows_utils.get_disk_number_by_mount_path) self._drv._unmount_device(mock.sentinel.share, mock.sentinel.server) mock_get_mount_path.assert_called_once_with(mock.sentinel.share) mock_get_disk_number_by_path.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self._windows_utils.set_disk_online_status.assert_called_once_with( mock.sentinel.server, mock_get_disk_number_by_path.return_value, online=False) @ddt.data(None, 1) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') @mock.patch.object(windows_drv.WindowsSMBDriver, '_ensure_disk_online_and_writable') def test_resize_filesystem(self, new_size, mock_ensure_disk, mock_get_disk_number): mock_get_disk_number.return_value = mock.sentinel.disk_number mock_get_max_size = self._windows_utils.get_partition_maximum_size mock_get_max_size.return_value = mock.sentinel.max_size self._drv._resize_filesystem(mock.sentinel.server, mock.sentinel.vol, new_size=new_size) mock_get_disk_number.assert_called_once_with(mock.sentinel.server, mock.sentinel.vol) self._drv._ensure_disk_online_and_writable.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) if not new_size: mock_get_max_size.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) expected_new_size = mock.sentinel.max_size else: expected_new_size = new_size << 30 self._windows_utils.resize_partition.assert_called_once_with( mock.sentinel.server, expected_new_size, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) def test_ensure_disk_online_and_writable(self): self._drv._ensure_disk_online_and_writable( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.update_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.set_disk_online_status.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, online=True) self._windows_utils.set_disk_readonly_status.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, readonly=False) def test_get_mounted_share_size(self): fake_size_gb = 10 self._windows_utils.get_disk_space_by_path.return_value = ( fake_size_gb << 30, mock.sentinel.free_bytes) share_size = self._drv._get_mounted_share_size( mock.sentinel.mount_path, mock.sentinel.server) self.assertEqual(fake_size_gb, share_size) def test_get_consumed_space(self): fake_size_gb = 2 fake_free_space_gb = 1 self._windows_utils.get_disk_space_by_path.return_value = ( fake_size_gb << 30, fake_free_space_gb << 30) consumed_space = self._drv._get_consumed_space( mock.sentinel.mount_path, mock.sentinel.server) self.assertEqual(fake_size_gb - fake_free_space_gb, consumed_space) def test_get_mount_path(self): fake_mount_path = 'fake_mount_path' fake_share_name = 'fake_share_name' mock_share = {'name': fake_share_name} self.flags(share_mount_path=fake_mount_path) mount_path = self._drv._get_mount_path(mock_share) self._windows_utils.normalize_path.assert_called_once_with( os.path.join(fake_mount_path, fake_share_name)) self.assertEqual(self._windows_utils.normalize_path.return_value, mount_path) @ddt.data(None, 2) def test_get_disk_number(self, disk_number_by_serial=None): mock_get_disk_number_by_serial = ( self._windows_utils.get_disk_number_by_serial_number) mock_get_disk_number_by_serial.return_value = disk_number_by_serial mock_volume = {'id': mock.sentinel.vol_id, 'mountpoint': "/dev/sdb"} # If the disk number cannot be identified using the disk serial # number, we expect it to be retrieved based on the volume mountpoint, # having disk number 1 in this case. expected_disk_number = (disk_number_by_serial if disk_number_by_serial else 1) disk_number = self._drv._get_disk_number(mock.sentinel.server, mock_volume) mock_get_disk_number_by_serial.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol_id) self.assertEqual(expected_disk_number, disk_number) @ddt.data(None, 2) def test_is_device_mounted(self, disk_number_by_path): mock_get_disk_number_by_path = ( self._windows_utils.get_disk_number_by_mount_path) mock_get_disk_number_by_path.return_value = disk_number_by_path expected_result = disk_number_by_path is not None is_mounted = self._drv._is_device_mounted( mount_path=mock.sentinel.mount_path, server_details=mock.sentinel.server) mock_get_disk_number_by_path.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self.assertEqual(expected_result, is_mounted) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_allow_access(self, access_level): access = { 'access_type': 'ip', 'access_to': 'fake_dest', 'access_level': access_level, } self._drv.allow_access( mock.sentinel.context, self._share, access, share_server=self._share_server) self._smb_helper.allow_access.assert_called_once_with( self._share_server['backend_details'], self._share['name'], access['access_type'], access['access_level'], access['access_to']) def test_allow_access_unsupported(self): access = { 'access_type': 'ip', 'access_to': 'fake_dest', 'access_level': 'fakefoobar', } self.assertRaises( exception.InvalidShareAccessLevel, self._drv.allow_access, mock.sentinel.context, self._share, access, share_server=self._share_server) def test_deny_access(self): access = 'fake_access' self._drv.deny_access( mock.sentinel.context, self._share, access, share_server=self._share_server) self._smb_helper.deny_access.assert_called_once_with( self._share_server['backend_details'], self._share['name'], access) manila-2.0.0/manila/tests/share/drivers/windows/test_windows_smb_helper.py0000664000567000056710000002703712701407107030303 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from manila.common import constants from manila import exception from manila.share import configuration from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila import test from oslo_config import cfg CONF = cfg.CONF CONF.import_opt('share_mount_path', 'manila.share.drivers.generic') @ddt.ddt class WindowsSMBHelperTestCase(test.TestCase): _FAKE_SERVER = {'public_address': mock.sentinel.public_address} _FAKE_SHARE_NAME = "fake_share_name" _FAKE_SHARE = "\\\\%s\\%s" % (_FAKE_SERVER['public_address'], _FAKE_SHARE_NAME) _FAKE_SHARE_LOCATION = os.path.join( configuration.Configuration(None).share_mount_path, _FAKE_SHARE_NAME) def setUp(self): self._remote_exec = mock.Mock() fake_conf = configuration.Configuration(None) self._win_smb_helper = windows_smb_helper.WindowsSMBHelper( self._remote_exec, fake_conf) super(WindowsSMBHelperTestCase, self).setUp() def test_init_helper(self): self._win_smb_helper.init_helper(mock.sentinel.server) self._remote_exec.assert_called_once_with(mock.sentinel.server, "Get-SmbShare") @ddt.data(True, False) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists') def test_create_export(self, share_exists, mock_share_exists): mock_share_exists.return_value = share_exists result = self._win_smb_helper.create_export(self._FAKE_SERVER, self._FAKE_SHARE_NAME) if not share_exists: cmd = ['New-SmbShare', '-Name', self._FAKE_SHARE_NAME, '-Path', self._win_smb_helper._windows_utils.normalize_path( self._FAKE_SHARE_LOCATION)] self._remote_exec.assert_called_once_with(self._FAKE_SERVER, cmd) else: self.assertFalse(self._remote_exec.called) self.assertEqual(self._FAKE_SHARE, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists') def test_remove_export(self, mock_share_exists): mock_share_exists.return_value = True self._win_smb_helper.remove_export(mock.sentinel.server, mock.sentinel.share_name) cmd = ['Remove-SmbShare', '-Name', mock.sentinel.share_name, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_utils.WindowsUtils, 'get_volume_path_by_mount_path') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_get_volume_path_by_share_name(self, mock_get_share_path, mock_get_vol_path): mock_get_share_path.return_value = self._FAKE_SHARE_LOCATION volume_path = self._win_smb_helper._get_volume_path_by_share_name( mock.sentinel.server, self._FAKE_SHARE_NAME) mock_get_share_path.assert_called_once_with(mock.sentinel.server, self._FAKE_SHARE_NAME) mock_get_vol_path.assert_called_once_with(mock.sentinel.server, self._FAKE_SHARE_LOCATION) self.assertEqual(mock_get_vol_path.return_value, volume_path) @ddt.data('ip', 'user') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_grant_share_access') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_grant_share_path_access') def test_allow_access(self, access_type, mock_grant_share_access, mock_grant_share_path_access): mock_args = (mock.sentinel.server, mock.sentinel.share_name, access_type, mock.sentinel.access_level, mock.sentinel.username) if access_type != 'user': self.assertRaises(exception.InvalidShareAccess, self._win_smb_helper.allow_access, *mock_args) else: self._win_smb_helper.allow_access(*mock_args) mock_grant_share_access.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.access_level, mock.sentinel.username) mock_grant_share_path_access.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.access_level, mock.sentinel.username) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl') def test_grant_share_access(self, mock_refresh_acl): self._win_smb_helper._grant_share_access(mock.sentinel.server, mock.sentinel.share_name, constants.ACCESS_LEVEL_RW, mock.sentinel.username) cmd = ["Grant-SmbShareAccess", "-Name", mock.sentinel.share_name, "-AccessRight", "Change", "-AccountName", mock.sentinel.username, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) mock_refresh_acl.assert_called_once_with(mock.sentinel.server, mock.sentinel.share_name) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_volume_path_by_share_name') def test_grant_share_path_access(self, mock_get_vol_path): fake_vol_path = 'fake_vol_path' mock_get_vol_path.return_value = fake_vol_path self._win_smb_helper._grant_share_path_access( mock.sentinel.server, mock.sentinel.share_name, constants.ACCESS_LEVEL_RW, mock.sentinel.username) expected_ace = '"%s:(OI)(CI)M"' % mock.sentinel.username cmd = ["icacls", '"%s"' % fake_vol_path, "/grant", expected_ace, "/t", "/c"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_refresh_acl(self): self._win_smb_helper._refresh_acl(mock.sentinel.server, mock.sentinel.share_name) cmd = ['Set-SmbPathAcl', '-ShareName', mock.sentinel.share_name] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_path_access') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_access') def test_deny_access(self, mock_revoke_share_path_access, mock_revoke_share_access): mock_access = {'access_to': mock.sentinel.username} self._win_smb_helper.deny_access(mock.sentinel.server, mock.sentinel.share_name, mock_access) mock_revoke_share_access.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.username) mock_revoke_share_path_access.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.username) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl') def test_revoke_share_access(self, mock_refresh_acl): self._win_smb_helper._revoke_share_access(mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.username) cmd = ["Revoke-SmbShareAccess", "-Name", mock.sentinel.share_name, "-AccountName", mock.sentinel.username, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) mock_refresh_acl.assert_called_once_with(mock.sentinel.server, mock.sentinel.share_name) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_volume_path_by_share_name') def test_revoke_share_path_access(self, mock_get_vol_path): fake_vol_path = 'fake_vol_path' mock_get_vol_path.return_value = fake_vol_path self._win_smb_helper._revoke_share_path_access( mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.username) cmd = ["icacls", '"%s"' % fake_vol_path, "/remove", mock.sentinel.username, "/t", "/c"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_get_share_name(self): result = self._win_smb_helper._get_share_name(self._FAKE_SHARE) self.assertEqual(self._FAKE_SHARE_NAME, result) def test_exports_for_share(self): result = self._win_smb_helper.get_exports_for_share( self._FAKE_SERVER, self._FAKE_SHARE_LOCATION) self.assertEqual([self._FAKE_SHARE], result) def test_get_share_path_by_name(self): self._remote_exec.return_value = (self._FAKE_SHARE_LOCATION, mock.sentinel.std_err) result = self._win_smb_helper._get_share_path_by_name( mock.sentinel.server, mock.sentinel.share_name) cmd = ('Get-SmbShare -Name %s | ' 'Select-Object -ExpandProperty Path' % mock.sentinel.share_name) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd, check_exit_code=True) self.assertEqual(self._FAKE_SHARE_LOCATION, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_get_share_path_by_export_location(self, mock_get_share_path_by_name): mock_get_share_path_by_name.return_value = mock.sentinel.share_path result = self._win_smb_helper.get_share_path_by_export_location( mock.sentinel.server, self._FAKE_SHARE) mock_get_share_path_by_name.assert_called_once_with( mock.sentinel.server, self._FAKE_SHARE_NAME) self.assertEqual(mock.sentinel.share_path, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_share_exists(self, mock_get_share_path_by_name): result = self._win_smb_helper._share_exists(mock.sentinel.server, mock.sentinel.share_name) mock_get_share_path_by_name.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, ignore_missing=True) self.assertTrue(result) manila-2.0.0/manila/tests/share/drivers/windows/test_service_instance.py0000664000567000056710000003777512701407107027747 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from oslo_concurrency import processutils from oslo_config import cfg from manila import exception from manila.share import configuration from manila.share.drivers import service_instance as generic_service_instance from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_utils from manila import test CONF = cfg.CONF CONF.import_opt('driver_handles_share_servers', 'manila.share.driver') CONF.register_opts(generic_service_instance.common_opts) serv_mgr_cls = service_instance.WindowsServiceInstanceManager generic_serv_mgr_cls = generic_service_instance.ServiceInstanceManager @ddt.ddt class WindowsServiceInstanceManagerTestCase(test.TestCase): _FAKE_SERVER = {'ip': mock.sentinel.ip, 'instance_id': mock.sentinel.instance_id} @mock.patch.object(windows_utils, 'WindowsUtils') @mock.patch.object(serv_mgr_cls, '_check_auth_mode') def setUp(self, mock_check_auth, mock_utils_cls): self.flags(service_instance_user=mock.sentinel.username) self._remote_execute = mock.Mock() fake_conf = configuration.Configuration(None) self._mgr = serv_mgr_cls(remote_execute=self._remote_execute, driver_config=fake_conf) self._windows_utils = mock_utils_cls.return_value super(WindowsServiceInstanceManagerTestCase, self).setUp() @ddt.data({}, {'use_cert_auth': False}, {'use_cert_auth': False, 'valid_pass_complexity': False}, {'certs_exist': False}) @mock.patch('os.path.exists') @mock.patch.object(serv_mgr_cls, '_check_password_complexity') @ddt.unpack def test_check_auth_mode(self, mock_check_complexity, mock_path_exists, use_cert_auth=True, certs_exist=True, valid_pass_complexity=True): self.flags(service_instance_password=mock.sentinel.password) self._mgr._cert_pem_path = mock.sentinel.cert_path self._mgr._cert_key_pem_path = mock.sentinel.key_path mock_path_exists.return_value = certs_exist mock_check_complexity.return_value = valid_pass_complexity self._mgr._use_cert_auth = use_cert_auth invalid_auth = ((use_cert_auth and not certs_exist) or not valid_pass_complexity) if invalid_auth: self.assertRaises(exception.ServiceInstanceException, self._mgr._check_auth_mode) else: self._mgr._check_auth_mode() if not use_cert_auth: mock_check_complexity.assert_called_once_with( mock.sentinel.password) @ddt.data(False, True) def test_get_auth_info(self, use_cert_auth): self._mgr._use_cert_auth = use_cert_auth self._mgr._cert_pem_path = mock.sentinel.cert_path self._mgr._cert_key_pem_path = mock.sentinel.key_path auth_info = self._mgr._get_auth_info() expected_auth_info = {'use_cert_auth': use_cert_auth} if use_cert_auth: expected_auth_info.update(cert_pem_path=mock.sentinel.cert_path, cert_key_pem_path=mock.sentinel.key_path) self.assertEqual(expected_auth_info, auth_info) @mock.patch.object(serv_mgr_cls, '_get_auth_info') @mock.patch.object(generic_serv_mgr_cls, 'get_common_server') def test_common_server(self, mock_generic_get_server, mock_get_auth): mock_server_details = {'backend_details': {}} mock_auth_info = {'fake_auth_info': mock.sentinel.auth_info} mock_generic_get_server.return_value = mock_server_details mock_get_auth.return_value = mock_auth_info expected_server_details = dict(backend_details=mock_auth_info) server_details = self._mgr.get_common_server() mock_generic_get_server.assert_called_once_with() self.assertEqual(expected_server_details, server_details) @mock.patch.object(serv_mgr_cls, '_get_auth_info') @mock.patch.object(generic_serv_mgr_cls, '_get_new_instance_details') def test_get_new_instance_details(self, mock_generic_get_details, mock_get_auth): mock_server_details = {'fake_server_details': mock.sentinel.server_details} mock_generic_get_details.return_value = mock_server_details mock_auth_info = {'fake_auth_info': mock.sentinel.auth_info} mock_get_auth.return_value = mock_auth_info expected_server_details = dict(mock_server_details, **mock_auth_info) instance_details = self._mgr._get_new_instance_details( server=mock.sentinel.server) mock_generic_get_details.assert_called_once_with(mock.sentinel.server) self.assertEqual(expected_server_details, instance_details) @ddt.data(('abAB01', True), ('abcdef', False), ('aA0', False)) @ddt.unpack def test_check_password_complexity(self, password, expected_result): valid_complexity = self._mgr._check_password_complexity( password) self.assertEqual(expected_result, valid_complexity) @ddt.data(None, Exception) def test_server_connection(self, side_effect): self._remote_execute.side_effect = side_effect expected_result = side_effect is None is_available = self._mgr._test_server_connection(self._FAKE_SERVER) self.assertEqual(expected_result, is_available) self._remote_execute.assert_called_once_with(self._FAKE_SERVER, "whoami", retry=False) @ddt.data(False, True) def test_get_service_instance_create_kwargs(self, use_cert_auth): self._mgr._use_cert_auth = use_cert_auth self.flags(service_instance_password=mock.sentinel.admin_pass) if use_cert_auth: mock_cert_data = 'mock_cert_data' self.mock_object(service_instance, 'open', mock.mock_open( read_data=mock_cert_data)) expected_kwargs = dict(user_data=mock_cert_data) else: expected_kwargs = dict( meta=dict(admin_pass=mock.sentinel.admin_pass)) create_kwargs = self._mgr._get_service_instance_create_kwargs() self.assertEqual(expected_kwargs, create_kwargs) @mock.patch.object(generic_serv_mgr_cls, 'set_up_service_instance') @mock.patch.object(serv_mgr_cls, 'get_valid_security_service') @mock.patch.object(serv_mgr_cls, '_setup_security_service') def test_set_up_service_instance(self, mock_setup_security_service, mock_get_valid_security_service, mock_generic_setup_serv_inst): mock_service_instance = {'instance_details': None} mock_network_info = {'security_services': mock.sentinel.security_services} mock_generic_setup_serv_inst.return_value = mock_service_instance mock_get_valid_security_service.return_value = ( mock.sentinel.security_service) instance_details = self._mgr.set_up_service_instance( mock.sentinel.context, mock_network_info) mock_generic_setup_serv_inst.assert_called_once_with( mock.sentinel.context, mock_network_info) mock_get_valid_security_service.assert_called_once_with( mock.sentinel.security_services) mock_setup_security_service.assert_called_once_with( mock_service_instance, mock.sentinel.security_service) expected_instance_details = dict(mock_service_instance, joined_domain=True) self.assertEqual(expected_instance_details, instance_details) @mock.patch.object(serv_mgr_cls, '_run_cloudbase_init_plugin_after_reboot') @mock.patch.object(serv_mgr_cls, '_join_domain') def test_setup_security_service(self, mock_join_domain, mock_run_cbsinit_plugin): utils = self._windows_utils mock_security_service = {'domain': mock.sentinel.domain, 'user': mock.sentinel.admin_username, 'password': mock.sentinel.admin_password, 'dns_ip': mock.sentinel.dns_ip} utils.get_interface_index_by_ip.return_value = ( mock.sentinel.interface_index) self._mgr._setup_security_service(self._FAKE_SERVER, mock_security_service) utils.set_dns_client_search_list.assert_called_once_with( self._FAKE_SERVER, [mock_security_service['domain']]) utils.get_interface_index_by_ip.assert_called_once_with( self._FAKE_SERVER, self._FAKE_SERVER['ip']) utils.set_dns_client_server_addresses.assert_called_once_with( self._FAKE_SERVER, mock.sentinel.interface_index, [mock_security_service['dns_ip']]) mock_run_cbsinit_plugin.assert_called_once_with( self._FAKE_SERVER, plugin_name=self._mgr._CBS_INIT_WINRM_PLUGIN) mock_join_domain.assert_called_once_with( self._FAKE_SERVER, mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) @ddt.data({'join_domain_side_eff': Exception}, {'server_available': False, 'expected_exception': exception.ServiceInstanceException}, {'join_domain_side_eff': processutils.ProcessExecutionError, 'expected_exception': processutils.ProcessExecutionError}, {'domain_mismatch': True, 'expected_exception': exception.ServiceInstanceException}) @mock.patch.object(generic_serv_mgr_cls, 'reboot_server') @mock.patch.object(generic_serv_mgr_cls, 'wait_for_instance_to_be_active') @mock.patch.object(generic_serv_mgr_cls, '_check_server_availability') @ddt.unpack def test_join_domain(self, mock_check_avail, mock_wait_instance_active, mock_reboot_server, expected_exception=None, server_available=True, domain_mismatch=False, join_domain_side_eff=None): self._windows_utils.join_domain.side_effect = join_domain_side_eff mock_check_avail.return_value = server_available self._windows_utils.get_current_domain.return_value = ( None if domain_mismatch else mock.sentinel.domain) domain_params = (mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) if expected_exception: self.assertRaises(expected_exception, self._mgr._join_domain, self._FAKE_SERVER, *domain_params) else: self._mgr._join_domain(self._FAKE_SERVER, *domain_params) if join_domain_side_eff != processutils.ProcessExecutionError: mock_reboot_server.assert_called_once_with( self._FAKE_SERVER, soft_reboot=True) mock_wait_instance_active.assert_called_once_with( self._FAKE_SERVER['instance_id'], timeout=self._mgr.max_time_to_build_instance) mock_check_avail.assert_called_once_with(self._FAKE_SERVER) if server_available: self._windows_utils.get_current_domain.assert_called_once_with( self._FAKE_SERVER) self._windows_utils.join_domain.assert_called_once_with( self._FAKE_SERVER, *domain_params) @ddt.data([], [{'type': 'active_directory'}], [{'type': 'active_directory'}] * 2, [{'type': mock.sentinel.invalid_type}]) def test_get_valid_security_service(self, security_services): valid_security_service = self._mgr.get_valid_security_service( security_services) if (security_services and len(security_services) == 1 and security_services[0]['type'] == 'active_directory'): expected_valid_sec_service = security_services[0] else: expected_valid_sec_service = None self.assertEqual(expected_valid_sec_service, valid_security_service) @mock.patch.object(serv_mgr_cls, '_get_cbs_init_reg_section') def test_run_cloudbase_init_plugin_after_reboot(self, mock_get_cbs_init_reg): self._FAKE_SERVER = {'instance_id': mock.sentinel.instance_id} mock_get_cbs_init_reg.return_value = mock.sentinel.cbs_init_reg_sect expected_plugin_key_path = "%(cbs_init)s\\%(instance_id)s\\Plugins" % { 'cbs_init': mock.sentinel.cbs_init_reg_sect, 'instance_id': self._FAKE_SERVER['instance_id']} self._mgr._run_cloudbase_init_plugin_after_reboot( server=self._FAKE_SERVER, plugin_name=mock.sentinel.plugin_name) mock_get_cbs_init_reg.assert_called_once_with(self._FAKE_SERVER) self._windows_utils.set_win_reg_value.assert_called_once_with( self._FAKE_SERVER, path=expected_plugin_key_path, key=mock.sentinel.plugin_name, value=self._mgr._CBS_INIT_RUN_PLUGIN_AFTER_REBOOT) @ddt.data( {}, {'exec_errors': [ processutils.ProcessExecutionError(stderr='Cannot find path'), processutils.ProcessExecutionError(stderr='Cannot find path')], 'expected_exception': exception.ServiceInstanceException}, {'exec_errors': [processutils.ProcessExecutionError(stderr='')], 'expected_exception': processutils.ProcessExecutionError}, {'exec_errors': [ processutils.ProcessExecutionError(stderr='Cannot find path'), None]} ) @ddt.unpack def test_get_cbs_init_reg_section(self, exec_errors=None, expected_exception=None): self._windows_utils.normalize_path.return_value = ( mock.sentinel.normalized_section_path) self._windows_utils.get_win_reg_value.side_effect = exec_errors if expected_exception: self.assertRaises(expected_exception, self._mgr._get_cbs_init_reg_section, mock.sentinel.server) else: cbs_init_section = self._mgr._get_cbs_init_reg_section( mock.sentinel.server) self.assertEqual(mock.sentinel.normalized_section_path, cbs_init_section) base_path = 'hklm:\\SOFTWARE' cbs_section = 'Cloudbase Solutions\\Cloudbase-Init' tested_upper_sections = [''] if exec_errors and 'Cannot find path' in exec_errors[0].stderr: tested_upper_sections.append('Wow6432Node') tested_sections = [os.path.join(base_path, upper_section, cbs_section) for upper_section in tested_upper_sections] self._windows_utils.normalize_path.assert_has_calls( [mock.call(tested_section) for tested_section in tested_sections]) manila-2.0.0/manila/tests/share/drivers/test_helpers.py0000664000567000056710000006374412701407107024366 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from oslo_config import cfg from manila.common import constants as const from manila import exception import manila.share.configuration from manila.share.drivers import helpers from manila import test from manila.tests import fake_compute from manila.tests import fake_utils from manila.tests.share.drivers import test_generic CONF = cfg.CONF @ddt.ddt class NFSHelperTestCase(test.TestCase): """Test case for NFS helper.""" def setUp(self): super(NFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.NFSHelper(self._execute, self._ssh_exec, self.fake_conf) ip = '10.254.0.3' self.server = fake_compute.FakeServer( ip=ip, public_address=ip, instance_id='fake_instance_id') self.share_name = 'fake_share_name' def test_init_helper(self): # mocks self.mock_object( self._helper, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError( stderr='command not found'))) # run self.assertRaises(exception.ManilaException, self._helper.init_helper, self.server) # asserts self._helper._ssh_exec.assert_called_once_with( self.server, ['sudo', 'exportfs']) def test_init_helper_log(self): # mocks self.mock_object( self._helper, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError( stderr='fake'))) # run self._helper.init_helper(self.server) # asserts self._helper._ssh_exec.assert_called_once_with( self.server, ['sudo', 'exportfs']) def test_create_export(self): ret = self._helper.create_export(self.server, self.share_name) expected_location = ':'.join([self.server['public_address'], os.path.join(CONF.share_mount_path, self.share_name)]) self.assertEqual(expected_location, ret) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): expected_mount_options = '%s,no_subtree_check' if access_level == const.ACCESS_LEVEL_RW: expected_mount_options = ','.join((expected_mount_options, 'no_root_squash')) self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') local_path = os.path.join(CONF.share_mount_path, self.share_name) exec_result = ' '.join([local_path, '2.2.2.3']) self.mock_object(self._helper, '_ssh_exec', mock.Mock(return_value=(exec_result, ''))) access_rules = [ test_generic.get_fake_access_rule('1.1.1.1', access_level), test_generic.get_fake_access_rule('2.2.2.2', access_level), test_generic.get_fake_access_rule('2.2.2.3', access_level)] add_rules = [ test_generic.get_fake_access_rule('2.2.2.2', access_level), test_generic.get_fake_access_rule('2.2.2.3', access_level), test_generic.get_fake_access_rule('5.5.5.5/24', access_level)] delete_rules = [ test_generic.get_fake_access_rule('3.3.3.3', access_level), test_generic.get_fake_access_rule('4.4.4.4', access_level, 'user'), test_generic.get_fake_access_rule('6.6.6.6/0', access_level)] self._helper.update_access(self.server, self.share_name, access_rules, add_rules=add_rules, delete_rules=delete_rules) local_path = os.path.join(CONF.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call(self.server, ['sudo', 'exportfs', '-u', ':'.join(['3.3.3.3', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-u', ':'.join(['6.6.6.6/0.0.0.0', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % access_level, ':'.join(['2.2.2.2', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % access_level, ':'.join(['5.5.5.5/255.255.255.0', local_path])]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_has_calls([ mock.call(self.server), mock.call(self.server)]) def test_update_access_invalid_type(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ] self.assertRaises( exception.InvalidShareAccess, self._helper.update_access, self.server, self.share_name, access_rules, [], []) def test_update_access_invalid_level(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', 'fake_level', access_type='ip'), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server, self.share_name, access_rules, [], []) def test_get_host_list(self): fake_exportfs = ('/shares/share-1\n\t\t20.0.0.3\n' '/shares/share-1\n\t\t20.0.0.6\n' '/shares/share-2\n\t\t10.0.0.2\n' '/shares/share-2\n\t\t10.0.0.5\n' '/shares/share-3\n\t\t30.0.0.4\n' '/shares/share-3\n\t\t30.0.0.7\n') expected = ['20.0.0.3', '20.0.0.6'] result = self._helper._get_host_list(fake_exportfs, '/shares/share-1') self.assertEqual(expected, result) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access_recovery_mode(self, access_level): expected_mount_options = '%s,no_subtree_check' if access_level == const.ACCESS_LEVEL_RW: expected_mount_options = ','.join((expected_mount_options, 'no_root_squash')) access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', access_level), ] self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self.mock_object(self._helper, '_get_host_list', mock.Mock(return_value=['1.1.1.1'])) self._helper.update_access(self.server, self.share_name, access_rules, [], []) local_path = os.path.join(CONF.share_mount_path, self.share_name) self._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call( self.server, ['sudo', 'exportfs', '-u', ':'.join([access_rules[0]['access_to'], local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % access_level, ':'.join(['1.1.1.1', local_path])]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_called_with( self.server) def test_sync_nfs_temp_and_perm_files(self): self._helper._sync_nfs_temp_and_perm_files(self.server) self._helper._ssh_exec.assert_has_calls( [mock.call(self.server, mock.ANY) for i in range(1)]) @ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.88:/foo/quuz') def test_get_exports_for_share(self, export_location): server = dict(public_address='1.2.3.4') result = self._helper.get_exports_for_share(server, export_location) path = export_location.split(':')[-1] self.assertEqual([':'.join([server['public_address'], path])], result) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_error(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('/foo/bar', '5.6.7.8:/foo/bar', '5.6.7.88:fake:/foo/bar') def test_get_share_path_by_export_location(self, export_location): result = self._helper.get_share_path_by_export_location( dict(), export_location) self.assertEqual('/foo/bar', result) def test_disable_access_for_maintenance(self): fake_maintenance_path = "fake.path" share_mount_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self.mock_object(self._helper, '_ssh_exec') self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self._helper.disable_access_for_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_any_call( self.server, ['cat', const.NFS_EXPORTS_FILE, '| grep', self.share_name, '| sudo tee', fake_maintenance_path] ) self._helper._ssh_exec.assert_any_call( self.server, ['sudo', 'exportfs', '-u', share_mount_path] ) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( self.server ) def test_restore_access_after_maintenance(self): fake_maintenance_path = "fake.path" self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec') self._helper.restore_access_after_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server, ['cat', fake_maintenance_path, '| sudo tee -a', const.NFS_EXPORTS_FILE, '&& sudo exportfs -r', '&& sudo rm -f', fake_maintenance_path] ) @ddt.ddt class CIFSHelperIPAccessTestCase(test.TestCase): """Test case for CIFS helper with IP access.""" def setUp(self): super(CIFSHelperIPAccessTestCase, self).setUp() self.server_details = {'instance_id': 'fake', 'public_address': '1.2.3.4', } self.share_name = 'fake_share_name' self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.CIFSHelperIPAccess(self._execute, self._ssh_exec, self.fake_conf) self.access = dict( access_level=const.ACCESS_LEVEL_RW, access_type='ip', access_to='1.1.1.1') def test_init_helper(self): self._helper.init_helper(self.server_details) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'list'], ) def test_create_export_share_does_not_exist(self): def fake_ssh_exec(*args, **kwargs): if 'showshare' in args[1]: raise exception.ProcessExecutionError() else: return '', '' self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) ret = self._helper.create_export(self.server_details, self.share_name) expected_location = '\\\\%s\\%s' % ( self.server_details['public_address'], self.share_name) self.assertEqual(expected_location, ret) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_does_not_exist_exception(self): self.mock_object(self._helper, '_ssh_exec', mock.Mock( side_effect=[exception.ProcessExecutionError(), Exception('')] )) self.assertRaises( exception.ManilaException, self._helper.create_export, self.server_details, self.share_name) def test_create_export_share_exist_recreate_true(self): ret = self._helper.create_export(self.server_details, self.share_name, recreate=True) expected_location = '\\\\%s\\%s' % ( self.server_details['public_address'], self.share_name) self.assertEqual(expected_location, ret) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_exist_recreate_false(self): self.assertRaises( exception.ShareBackendException, self._helper.create_export, self.server_details, self.share_name, recreate=False, ) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), ]) def test_remove_export(self): self._helper.remove_export(self.server_details, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ) def test_remove_export_forcibly(self): delshare_command = ['sudo', 'net', 'conf', 'delshare', self.share_name] def fake_ssh_exec(*args, **kwargs): if delshare_command == args[1]: raise exception.ProcessExecutionError() else: return ('', '') self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) self._helper.remove_export(self.server_details, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ), mock.call( self.server_details, ['sudo', 'smbcontrol', 'all', 'close-share', self.share_name], ), ]) def test_update_access_wrong_access_level(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RO), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access_wrong_access_type(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ] self.assertRaises( exception.InvalidShareAccess, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access(self): access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', const.ACCESS_LEVEL_RW), ] self._helper.update_access(self.server_details, self.share_name, access_rules, [], []) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, '"hosts allow"', '"1.1.1.1"']) def test_get_allow_hosts(self): self.mock_object(self._helper, '_ssh_exec', mock.Mock( return_value=('1.1.1.1 2.2.2.2 3.3.3.3', ''))) expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] result = self._helper._get_allow_hosts( self.server_details, self.share_name) self.assertEqual(expected, result) cmd = ['sudo', 'net', 'conf', 'getparm', self.share_name, '\"hosts allow\"'] self._helper._ssh_exec.assert_called_once_with( self.server_details, cmd) @ddt.data( '', '1.2.3.4:/nfs/like/export', '/1.2.3.4/foo', '\\1.2.3.4\\foo', '//1.2.3.4\\mixed_slashes_and_backslashes_one', '\\\\1.2.3.4/mixed_slashes_and_backslashes_two') def test__get_share_group_name_from_export_location(self, export_location): self.assertRaises( exception.InvalidShare, self._helper._get_share_group_name_from_export_location, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_exports_for_share(self, export_location): server = dict(public_address='1.2.3.4') self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_exports_for_share(server, export_location) expected_export_location = ['\\\\%s\\foo' % server['public_address']] self.assertEqual(expected_export_location, result) self._helper._get_share_group_name_from_export_location.\ assert_called_once_with(export_location) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_exception(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_share_path_by_export_location(self, export_location): fake_path = ' /bar/quuz\n ' fake_server = dict() self.mock_object( self._helper, '_ssh_exec', mock.Mock(return_value=(fake_path, 'fake'))) self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_share_path_by_export_location( fake_server, export_location) self.assertEqual('/bar/quuz', result) self._helper._ssh_exec.assert_called_once_with( fake_server, ['sudo', 'net', 'conf', 'getparm', 'foo', 'path']) self._helper._get_share_group_name_from_export_location.\ assert_called_once_with(export_location) def test_disable_access_for_maintenance(self): allowed_hosts = ['test', 'test2'] maintenance_path = os.path.join( self._helper.configuration.share_mount_path, "%s.maintenance" % self.share_name) self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=allowed_hosts)) self._helper.disable_access_for_maintenance( self.server_details, self.share_name) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, [], self.share_name) valid_cmd = ['echo', "'test test2'", '| sudo tee', maintenance_path] self._helper._ssh_exec.assert_called_once_with( self.server_details, valid_cmd) def test_restore_access_after_maintenance(self): fake_maintenance_path = "test.path" self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=[("fake fake2", 0), "fake"])) self._helper.restore_access_after_maintenance( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, ['fake', 'fake2'], self.share_name) self._helper._ssh_exec.assert_any_call( self.server_details, ['cat', fake_maintenance_path]) self._helper._ssh_exec.assert_any_call( self.server_details, ['sudo rm -f', fake_maintenance_path]) @ddt.ddt class CIFSHelperUserAccessTestCase(test.TestCase): """Test case for CIFS helper with user access.""" access_rw = dict( access_level=const.ACCESS_LEVEL_RW, access_type='user', access_to='manila-user') access_ro = dict( access_level=const.ACCESS_LEVEL_RO, access_type='user', access_to='manila-user') def setUp(self): super(CIFSHelperUserAccessTestCase, self).setUp() self.server_details = {'instance_id': 'fake', 'public_address': '1.2.3.4', } self.share_name = 'fake_share_name' self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.CIFSHelperUserAccess( self._execute, self._ssh_exec, self.fake_conf) def test_update_access_exception_type(self): access_rules = [test_generic.get_fake_access_rule( 'user1', const.ACCESS_LEVEL_RW, access_type='ip')] self.assertRaises(exception.InvalidShareAccess, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access(self): access_list = [test_generic.get_fake_access_rule( 'user1', const.ACCESS_LEVEL_RW, access_type='user'), test_generic.get_fake_access_rule( 'user2', const.ACCESS_LEVEL_RO, access_type='user')] self._helper.update_access(self.server_details, self.share_name, access_list, [], []) self._helper._ssh_exec.assert_has_calls([ mock.call(self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, 'valid users', '"user1"']), mock.call(self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, 'read list', '"user2"']) ]) def test_update_access_exception_level(self): access_rules = [test_generic.get_fake_access_rule( 'user1', 'fake_level', access_type='user'), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) @ddt.ddt class NFSSynchronizedTestCase(test.TestCase): @helpers.nfs_synchronized def wrapped_method(self, server, share_name): return server['instance_id'] + share_name @ddt.data( ({'lock_name': 'FOO', 'instance_id': 'QUUZ'}, 'nfs-FOO'), ({'instance_id': 'QUUZ'}, 'nfs-QUUZ'), ) @ddt.unpack def test_with_lock_name(self, server, expected_lock_name): share_name = 'fake_share_name' self.mock_object( helpers.utils, 'synchronized', mock.Mock(side_effect=helpers.utils.synchronized)) result = self.wrapped_method(server, share_name) self.assertEqual(server['instance_id'] + share_name, result) helpers.utils.synchronized.assert_called_once_with( expected_lock_name, external=True) manila-2.0.0/manila/tests/share/test_hook.py0000664000567000056710000003111412701407107022170 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from manila import context from manila.share import hook from manila import test class FakeHookImplementation(hook.HookBase): def _execute_pre_hook(self, context, func_name, *args, **kwargs): """Fake implementation of a pre hook action.""" def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): """Fake implementation of a post hook action.""" def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Fake implementation of a periodic hook action.""" @ddt.ddt class HookBaseTestCase(test.TestCase): def setUp(self): super(HookBaseTestCase, self).setUp() self.context = context.get_admin_context() self.default_config = { "enable_pre_hooks": "fake_enable_pre_hooks", "enable_post_hooks": "fake_enable_post_hooks", "enable_periodic_hooks": "fake_enable_periodic_hooks", "suppress_pre_hooks_errors": "fake_suppress_pre_hook_errors", "suppress_post_hooks_errors": "fake_suppress_post_hook_errors", } for k, v in self.default_config.items(): hook.CONF.set_default(k, v) def _fake_safe_get(self, key): return self.default_config.get(key) + "_safe_get" def _get_hook_instance(self, set_configuration=True, host="fake_host"): if set_configuration: configuration = mock.Mock() configuration.safe_get.side_effect = self._fake_safe_get else: configuration = None instance = FakeHookImplementation( configuration=configuration, host=host) return instance def test_instantiate_hook_fail(self): self.assertRaises(TypeError, hook.HookBase) @ddt.data(True, False) def test_instantiate_hook_successfully_and_set_configuration( self, set_configuration): instance = self._get_hook_instance(set_configuration) self.assertTrue(hasattr(instance, 'host')) self.assertEqual("fake_host", instance.host) self.assertTrue(hasattr(instance, 'configuration')) if not set_configuration: self.assertIsNone(instance.configuration) for attr_name in ("pre_hooks_enabled", "post_hooks_enabled", "periodic_hooks_enabled", "suppress_pre_hooks_errors", "suppress_post_hooks_errors"): self.assertTrue(hasattr(instance, attr_name)) if set_configuration: instance.configuration.append_config_values.assert_has_calls([ mock.call(hook.hook_options)]) conf_func = self._fake_safe_get else: conf_func = self.default_config.get self.assertEqual( conf_func("enable_pre_hooks"), instance.pre_hooks_enabled) self.assertEqual( conf_func("enable_post_hooks"), instance.post_hooks_enabled) self.assertEqual( conf_func("enable_periodic_hooks"), instance.periodic_hooks_enabled) self.assertEqual( conf_func("suppress_pre_hooks_errors"), instance.suppress_pre_hooks_errors) self.assertEqual( conf_func("suppress_post_hooks_errors"), instance.suppress_post_hooks_errors) def test_execute_pre_hook_disabled(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = False self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=Exception("I should not be raised."))) result = instance.execute_pre_hook( self.context, "fake_func_name", "some_arg", some_kwarg="foo") self.assertIsNone(result) @ddt.data(True, False) def test_execute_pre_hook_success(self, provide_context): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = True expected = "fake_expected_result" some_arg = "some_arg" func_name = "fake_func_name" self.mock_object(hook.LOG, 'error') self.mock_object( instance, "_execute_pre_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_pre_hook( ctxt, func_name, some_arg, some_kwarg="foo") self.assertEqual(expected, result) instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context if provide_context else mock_ctxt, func_name=func_name, some_kwarg="foo") self.assertFalse(hook.LOG.error.called) def test_execute_pre_hook_exception_with_suppression(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = True some_arg = "some_arg" func_name = "fake_func_name" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=( FakeException("Some exception that should be suppressed.")))) result = instance.execute_pre_hook( self.context, func_name, some_arg, some_kwarg="foo") self.assertIsInstance(result, FakeException) instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, some_kwarg="foo") self.assertTrue(hook.LOG.warning.called) def test_execute_pre_hook_exception_without_suppression(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = False some_arg = "some_arg" func_name = "fake_func_name" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=( FakeException( "Some exception that should NOT be suppressed.")))) self.assertRaises( FakeException, instance.execute_pre_hook, self.context, func_name, some_arg, some_kwarg="foo") instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, some_kwarg="foo") self.assertFalse(hook.LOG.warning.called) def test_execute_post_hook_disabled(self): instance = self._get_hook_instance() instance.post_hooks_enabled = False self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=Exception("I should not be raised."))) result = instance.execute_post_hook( self.context, "fake_func_name", "some_pre_hook_data", "some_driver_action_results", "some_arg", some_kwarg="foo") self.assertIsNone(result) @ddt.data(True, False) def test_execute_post_hook_success(self, provide_context): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = True expected = "fake_expected_result" some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_post_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_post_hook( ctxt, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") self.assertEqual(expected, result) instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context if provide_context else mock_ctxt, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertFalse(hook.LOG.warning.called) def test_execute_post_hook_exception_with_suppression(self): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = True some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=( FakeException("Some exception that should be suppressed.")))) result = instance.execute_post_hook( self.context, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") self.assertIsInstance(result, FakeException) instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertTrue(hook.LOG.warning.called) def test_execute_post_hook_exception_without_suppression(self): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = False some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'error') self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=( FakeException( "Some exception that should NOT be suppressed.")))) self.assertRaises( FakeException, instance.execute_post_hook, self.context, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertFalse(hook.LOG.error.called) def test_execute_periodic_hook_disabled(self): instance = self._get_hook_instance() instance.periodic_hooks_enabled = False self.mock_object(instance, "_execute_periodic_hook") instance.execute_periodic_hook( self.context, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") self.assertFalse(instance._execute_periodic_hook.called) @ddt.data(True, False) def test_execute_periodic_hook_enabled(self, provide_context): instance = self._get_hook_instance() instance.periodic_hooks_enabled = True expected = "some_expected_result" self.mock_object( instance, "_execute_periodic_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_periodic_hook( ctxt, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") instance._execute_periodic_hook.assert_called_once_with( ctxt, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") self.assertEqual(expected, result) manila-2.0.0/manila/tests/share/test_manager.py0000664000567000056710000066711412701407112022655 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Share Manager for Manila.""" import datetime import random import ddt import mock from oslo_concurrency import lockutils from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils import six from manila.common import constants from manila import context from manila.data import rpcapi as data_rpc from manila import db from manila.db.sqlalchemy import models from manila import exception from manila import quota from manila.share import access as share_access from manila.share import drivers_private_data from manila.share import manager from manila.share import migration as migration_api from manila.share import rpcapi from manila.share import share_types from manila import test from manila.tests.api import fakes as test_fakes from manila.tests import db_utils from manila.tests import fake_share as fakes from manila.tests import fake_utils from manila.tests import utils as test_utils from manila import utils def fake_replica(**kwargs): return fakes.fake_replica(for_manager=True, **kwargs) class LockedOperationsTestCase(test.TestCase): class FakeManager(object): @manager.locked_share_replica_operation def fake_replica_operation(self, context, replica, share_id=None): pass def setUp(self): super(self.__class__, self).setUp() self.manager = self.FakeManager() self.fake_context = test_fakes.FakeRequestContext self.lock_call = self.mock_object( utils, 'synchronized', mock.Mock(return_value=lambda f: f)) @ddt.data({'id': 'FAKE_REPLICA_ID'}, 'FAKE_REPLICA_ID') @ddt.unpack def test_locked_share_replica_operation(self, **replica): self.manager.fake_replica_operation(self.fake_context, replica, share_id='FAKE_SHARE_ID') self.assertTrue(self.lock_call.called) @ddt.ddt class ShareManagerTestCase(test.TestCase): def setUp(self): super(ShareManagerTestCase, self).setUp() self.flags(share_driver='manila.tests.fake_driver.FakeShareDriver') # Define class directly, because this test suite dedicated # to specific manager. self.share_manager = importutils.import_object( "manila.share.manager.ShareManager") self.mock_object(self.share_manager.driver, 'do_setup') self.mock_object(self.share_manager.driver, 'check_for_setup_error') self.context = context.get_admin_context() self.share_manager.driver.initialized = True mock.patch.object( lockutils, 'lock', fake_utils.get_fake_lock_context()) self.synchronized_lock_decorator_call = self.mock_object( utils, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_share_manager_instance(self): fake_service_name = "fake_service" import_mock = mock.Mock() self.mock_object(importutils, "import_object", import_mock) private_data_mock = mock.Mock() self.mock_object(drivers_private_data, "DriverPrivateData", private_data_mock) self.mock_object(manager.ShareManager, '_init_hook_drivers') share_manager = manager.ShareManager(service_name=fake_service_name) private_data_mock.assert_called_once_with( context=mock.ANY, backend_host=share_manager.host, config_group=fake_service_name ) self.assertTrue(import_mock.called) self.assertTrue(manager.ShareManager._init_hook_drivers.called) def test__init_hook_drivers(self): fake_service_name = "fake_service" import_mock = mock.Mock() self.mock_object(importutils, "import_object", import_mock) self.mock_object(drivers_private_data, "DriverPrivateData") share_manager = manager.ShareManager(service_name=fake_service_name) share_manager.configuration.safe_get = mock.Mock( return_value=["Foo", "Bar"]) self.assertEqual(0, len(share_manager.hooks)) import_mock.reset() share_manager._init_hook_drivers() self.assertEqual( len(share_manager.configuration.safe_get.return_value), len(share_manager.hooks)) import_mock.assert_has_calls([ mock.call( hook, configuration=share_manager.configuration, host=share_manager.host ) for hook in share_manager.configuration.safe_get.return_value ], any_order=True) def test__execute_periodic_hook(self): share_instances_mock = mock.Mock() hook_data_mock = mock.Mock() self.mock_object( self.share_manager.db, "share_instances_get_all_by_host", share_instances_mock) self.mock_object( self.share_manager.driver, "get_periodic_hook_data", hook_data_mock) self.share_manager.hooks = [mock.Mock(return_value=i) for i in (0, 1)] self.share_manager._execute_periodic_hook(self.context) share_instances_mock.assert_called_once_with( context=self.context, host=self.share_manager.host) hook_data_mock.assert_called_once_with( context=self.context, share_instances=share_instances_mock.return_value) for mock_hook in self.share_manager.hooks: mock_hook.execute_periodic_hook.assert_called_once_with( context=self.context, periodic_hook_data=hook_data_mock.return_value) def test_init_host_with_no_shares(self): self.mock_object(self.share_manager.db, 'share_instances_get_all_by_host', mock.Mock(return_value=[])) self.share_manager.init_host() self.assertTrue(self.share_manager.driver.initialized) self.share_manager.db.share_instances_get_all_by_host.\ assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.driver.check_for_setup_error.\ assert_called_once_with() @ddt.data( "migration_get_driver_info", "migration_get_info", "migration_cancel", "migration_get_progress", "migration_complete", "migration_start", "create_share_instance", "manage_share", "unmanage_share", "delete_share_instance", "delete_free_share_servers", "create_snapshot", "delete_snapshot", "allow_access", "deny_access", "_report_driver_status", "_execute_periodic_hook", "publish_service_capabilities", "delete_share_server", "extend_share", "shrink_share", "create_consistency_group", "delete_consistency_group", "create_cgsnapshot", "delete_cgsnapshot", "create_share_replica", "delete_share_replica", "promote_share_replica", "periodic_share_replica_update", "update_share_replica", "create_replicated_snapshot", "delete_replicated_snapshot", "periodic_share_replica_snapshot_update", ) def test_call_driver_when_its_init_failed(self, method_name): self.mock_object(self.share_manager.driver, 'do_setup', mock.Mock(side_effect=Exception())) self.share_manager.init_host() self.assertRaises( exception.DriverNotInitialized, getattr(self.share_manager, method_name), 'foo', 'bar', 'quuz' ) @ddt.data("do_setup", "check_for_setup_error") def test_init_host_with_driver_failure(self, method_name): self.mock_object(self.share_manager.driver, method_name, mock.Mock(side_effect=Exception())) self.mock_object(manager.LOG, 'exception') self.share_manager.driver.initialized = False self.share_manager.init_host() manager.LOG.exception.assert_called_once_with( mock.ANY, {'name': self.share_manager.driver.__class__.__name__, 'host': self.share_manager.host, 'exc': mock.ANY}) self.assertFalse(self.share_manager.driver.initialized) def _setup_init_mocks(self, setup_access_rules=True): instances = [ db_utils.create_share(id='fake_id_1', status=constants.STATUS_AVAILABLE, display_name='fake_name_1').instance, db_utils.create_share(id='fake_id_2', status=constants.STATUS_ERROR, display_name='fake_name_2').instance, db_utils.create_share(id='fake_id_3', status=constants.STATUS_AVAILABLE, display_name='fake_name_3').instance, db_utils.create_share( id='fake_id_4', status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS, display_name='fake_name_4').instance, db_utils.create_share(id='fake_id_5', status=constants.STATUS_AVAILABLE, display_name='fake_name_5').instance, ] instances[4]['access_rules_status'] = constants.STATUS_OUT_OF_SYNC if not setup_access_rules: return instances rules = [ db_utils.create_access(share_id='fake_id_1'), db_utils.create_access(share_id='fake_id_3'), ] return instances, rules def test_init_host_with_shares_and_rules(self): # initialization of test data def raise_share_access_exists(*args, **kwargs): raise exception.ShareAccessExists( access_type='fake_access_type', access='fake_access') instances, rules = self._setup_init_mocks() fake_export_locations = ['fake/path/1', 'fake/path'] share_server = 'fake_share_server_type_does_not_matter' self.mock_object(self.share_manager.db, 'share_instances_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.db, 'share_export_locations_update') self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(return_value=fake_export_locations)) self.mock_object(self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, 'publish_service_capabilities', mock.Mock()) self.mock_object(self.share_manager.db, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=raise_share_access_exists) ) # call of 'init_host' method self.share_manager.init_host() # verification of call self.share_manager.db.share_instances_get_all_by_host.\ assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host) exports_update = self.share_manager.db.share_export_locations_update exports_update.assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], fake_export_locations), mock.call(mock.ANY, instances[2]['id'], fake_export_locations) ]) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.driver.check_for_setup_error.\ assert_called_once_with() self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.driver.ensure_share.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0], share_server=share_server), mock.call(utils.IsAMatcher(context.RequestContext), instances[2], share_server=share_server), ]) self.share_manager.publish_service_capabilities.\ assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.access_helper.update_access_rules.assert_has_calls([ mock.call(mock.ANY, instances[4]['id'], share_server=share_server), ]) def test_init_host_with_exception_on_ensure_share(self): def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") instances = self._setup_init_mocks(setup_access_rules=False) share_server = 'fake_share_server_type_does_not_matter' self.mock_object(self.share_manager.db, 'share_instances_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[3]])) self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(side_effect=raise_exception)) self.mock_object(self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, 'publish_service_capabilities') self.mock_object(manager.LOG, 'error') self.mock_object(manager.LOG, 'info') # call of 'init_host' method self.share_manager.init_host() # verification of call self.share_manager.db.share_instances_get_all_by_host.\ assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.driver.check_for_setup_error.assert_called_with() self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.driver.ensure_share.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0], share_server=share_server), mock.call(utils.IsAMatcher(context.RequestContext), instances[2], share_server=share_server), ]) self.share_manager.publish_service_capabilities.\ assert_called_once_with( utils.IsAMatcher(context.RequestContext)) manager.LOG.info.assert_any_call( mock.ANY, {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( mock.ANY, {'id': instances[1]['id'], 'status': instances[1]['status']}, ) def test_init_host_with_exception_on_update_access_rules(self): def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") instances, rules = self._setup_init_mocks() share_server = 'fake_share_server_type_does_not_matter' smanager = self.share_manager self.mock_object(smanager.db, 'share_instances_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(return_value=None)) self.mock_object(smanager, '_ensure_share_instance_has_pool') self.mock_object(smanager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(smanager, 'publish_service_capabilities') self.mock_object(manager.LOG, 'error') self.mock_object(manager.LOG, 'info') self.mock_object(smanager.db, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) self.mock_object(smanager.access_helper, 'update_access_rules', mock.Mock(side_effect=raise_exception)) # call of 'init_host' method smanager.init_host() # verification of call smanager.db.share_instances_get_all_by_host.\ assert_called_once_with(utils.IsAMatcher(context.RequestContext), smanager.host) smanager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) smanager.driver.check_for_setup_error.assert_called_with() smanager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) smanager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) smanager.driver.ensure_share.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0], share_server=share_server), mock.call(utils.IsAMatcher(context.RequestContext), instances[2], share_server=share_server), ]) self.share_manager.publish_service_capabilities.\ assert_called_once_with( utils.IsAMatcher(context.RequestContext)) manager.LOG.info.assert_any_call( mock.ANY, {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( mock.ANY, {'id': instances[1]['id'], 'status': instances[1]['status']}, ) smanager.access_helper.update_access_rules.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[4]['id'], share_server=share_server), ]) manager.LOG.error.assert_has_calls([ mock.call(mock.ANY, mock.ANY), ]) def test_create_share_instance_from_snapshot_with_server(self): """Test share can be created from snapshot if server exists.""" network = db_utils.create_share_network() server = db_utils.create_share_server( share_network_id=network['id'], host='fake_host', backend_details=dict(fake='fake')) parent_share = db_utils.create_share(share_network_id='net-id', share_server_id=server['id']) share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=parent_share['id']) snapshot_id = snapshot['id'] self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual(server['id'], shr['instance']['share_server_id']) def test_create_share_instance_from_snapshot_with_server_not_found(self): """Test creation from snapshot fails if server not found.""" parent_share = db_utils.create_share(share_network_id='net-id', share_server_id='fake-id') share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=parent_share['id']) snapshot_id = snapshot['id'] self.assertRaises(exception.ShareServerNotFound, self.share_manager.create_share_instance, self.context, share.instance['id'], snapshot_id=snapshot_id ) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) def test_create_share_instance_from_snapshot(self): """Test share can be created from snapshot.""" share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertTrue(len(shr['export_location']) > 0) self.assertEqual(2, len(shr['export_locations'])) def test_create_share_instance_for_share_with_replication_support(self): """Test update call is made to update replica_state.""" share = db_utils.create_share(replication_type='writable') share_id = share['id'] self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) shr_instance = db.share_instance_get(self.context, share.instance['id']) self.assertEqual(constants.STATUS_AVAILABLE, shr['status'],) self.assertEqual(constants.REPLICA_STATE_ACTIVE, shr_instance['replica_state']) @ddt.data([], None) def test_create_share_replica_no_active_replicas(self, active_replicas): replica = fake_replica() self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replicas)) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.ReplicationException, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) def test_create_share_replica_with_share_network_id_and_not_dhss(self): replica = fake_replica() manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=fake_replica(id='fake2'))) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.InvalidDriverMode, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) def test_create_share_replica_with_share_server_exception(self): replica = fake_replica() manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=[])) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=fake_replica(id='fake2'))) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.NotFound, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) def test_create_share_replica_driver_error_on_creation(self): fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] replica = fake_replica(share_network_id='') replica_2 = fake_replica(id='fake2') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'share_export_locations_update') mock_log_error = self.mock_object(manager.LOG, 'error') mock_log_info = self.mock_object(manager.LOG, 'info') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( db, 'share_instance_update_access_status') self.mock_object(self.share_manager, '_get_share_server') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertEqual(1, mock_share_replica_access_update.call_count) self.assertFalse(mock_export_locs_update_call.called) self.assertTrue(mock_log_error.called) self.assertFalse(mock_log_info.called) self.assertTrue(driver_call.called) def test_create_share_replica_invalid_locations_state(self): driver_retval = { 'export_locations': 'FAKE_EXPORT_LOC', } replica = fake_replica(share_network='') replica_2 = fake_replica(id='fake2') fake_access_rules = [{'id': '1'}, {'id': '2'}] self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'share_export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'error') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=driver_retval)) self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( db, 'share_instance_update_access_status') self.share_manager.create_share_replica(self.context, replica) self.assertFalse(mock_replica_update_call.called) self.assertEqual(1, mock_share_replica_access_update.call_count) self.assertFalse(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertTrue(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) call_args = driver_call.call_args_list[0][0] replica_list_arg = call_args[1] r_ids = [r['id'] for r in replica_list_arg] for r in (replica, replica_2): self.assertIn(r['id'], r_ids) self.assertEqual(2, len(r_ids)) def test_create_share_replica_no_availability_zone(self): replica = fake_replica( availability_zone=None, share_network='', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) replica_2 = fake_replica(id='fake2') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) manager.CONF.set_default('storage_availability_zone', 'fake_az') fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object( db, 'share_replica_update', mock.Mock(return_value=replica)) mock_calls = [ mock.call(mock.ANY, replica['id'], {'availability_zone': 'fake_az'}, with_share_data=True), mock.call(mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}), ] mock_export_locs_update_call = self.mock_object( db, 'share_export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'warning') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( self.share_manager, '_update_share_replica_access_rules_state') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock()) self.share_manager.create_share_replica(self.context, replica) mock_replica_update_call.assert_has_calls(mock_calls, any_order=False) mock_share_replica_access_update.assert_called_once_with( mock.ANY, replica['id'], replica['access_rules_status']) self.assertTrue(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertFalse(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) @ddt.data(True, False) def test_create_share_replica(self, has_snapshots): replica = fake_replica( share_network='', replica_state=constants.REPLICA_STATE_IN_SYNC) replica_2 = fake_replica(id='fake2') snapshots = ([fakes.fake_snapshot(create_instance=True)] if has_snapshots else []) snapshot_instances = [ fakes.fake_snapshot_instance(share_instance_id=replica['id']), fakes.fake_snapshot_instance(share_instance_id='fake2'), ] fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock( return_value=snapshots)) mock_instance_get_call = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'share_export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'warning') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( db, 'share_instance_update_access_status') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.share_manager.create_share_replica(self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_IN_SYNC}) self.assertEqual(1, mock_share_replica_access_update.call_count) self.assertTrue(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertFalse(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) call_args = driver_call.call_args_list[0][0] replica_list_arg = call_args[1] snapshot_list_arg = call_args[4] r_ids = [r['id'] for r in replica_list_arg] for r in (replica, replica_2): self.assertIn(r['id'], r_ids) self.assertEqual(2, len(r_ids)) if has_snapshots: for snapshot_dict in snapshot_list_arg: self.assertTrue('active_replica_snapshot' in snapshot_dict) self.assertTrue('share_replica_snapshot' in snapshot_dict) else: self.assertFalse(mock_instance_get_call.called) def test_delete_share_replica_access_rules_exception(self): replica = fake_replica() replica_2 = fake_replica(id='fake_2') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) active_replica = fake_replica( id='Current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(self.share_manager.access_helper, 'update_access_rules') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica') self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_replica, self.context, replica['id'], share_id=replica['share_id']) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR}) self.assertFalse(mock_drv_delete_replica_call.called) self.assertFalse(mock_replica_delete_call.called) self.assertFalse(mock_exception_log.called) def test_delete_share_replica_drv_misbehavior_ignored_with_the_force(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.access_helper, 'update_access_rules') self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) mock_snap_instance_delete = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.share_manager.delete_share_replica( self.context, replica['id'], share_id=replica['share_id'], force=True) self.assertFalse(mock_replica_update_call.called) self.assertTrue(mock_replica_delete_call.called) self.assertEqual(1, mock_exception_log.call_count) self.assertTrue(mock_drv_delete_replica_call.called) self.assertFalse(mock_snap_instance_delete.called) def test_delete_share_replica_driver_exception(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_snapshot_get_call = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_replica, self.context, replica['id'], share_id=replica['share_id']) self.assertTrue(mock_replica_update_call.called) self.assertFalse(mock_replica_delete_call.called) self.assertTrue(mock_drv_delete_replica_call.called) self.assertTrue(mock_snapshot_get_call.called) def test_delete_share_replica_both_exceptions_ignored_with_the_force(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') snapshots = [ fakes.fake_snapshot(share_id=replica['id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(share_id=replica['id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) mock_snapshot_instance_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=exception.ManilaException)) mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.share_manager.delete_share_replica( self.context, replica['id'], share_id=replica['share_id'], force=True) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR}) self.assertTrue(mock_replica_delete_call.called) self.assertEqual(2, mock_exception_log.call_count) self.assertTrue(mock_drv_delete_replica_call.called) self.assertEqual(2, mock_snapshot_instance_delete_call.call_count) def test_delete_share_replica(self): replica = fake_replica() active_replica = fake_replica(id='current_active_replica') snapshots = [ fakes.fake_snapshot(share_id=replica['share_id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(share_id=replica['share_id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_snapshot_instance_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica') self.share_manager.delete_share_replica(self.context, replica) self.assertFalse(mock_replica_update_call.called) self.assertTrue(mock_replica_delete_call.called) self.assertTrue(mock_info_log.called) self.assertTrue(mock_drv_delete_replica_call.called) self.assertEqual(2, mock_snapshot_instance_delete_call.call_count) def test_promote_share_replica_no_active_replica(self): replica = fake_replica() replica_list = [replica] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_list)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_driver_call = self.mock_object(self.share_manager.driver, 'promote_replica') mock_replica_update = self.mock_object(db, 'share_replica_update') expected_update_call = mock.call( mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE}) self.assertRaises(exception.ReplicationException, self.share_manager.promote_share_replica, self.context, replica) self.assertFalse(mock_info_log.called) self.assertFalse(mock_driver_call.called) mock_replica_update.assert_has_calls([expected_update_call]) def test_promote_share_replica_driver_exception(self): replica = fake_replica() active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) replica_list = [replica, active_replica] self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) self.mock_object(self.share_manager.driver, 'promote_replica', mock.Mock(side_effect=exception.ManilaException)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_replica_update = self.mock_object(db, 'share_replica_update') expected_update_calls = [mock.call( mock.ANY, r['id'], {'status': constants.STATUS_ERROR}) for r in(replica, active_replica)] self.assertRaises(exception.ManilaException, self.share_manager.promote_share_replica, self.context, replica) mock_replica_update.assert_has_calls(expected_update_calls) self.assertFalse(mock_info_log.called) @ddt.data([], None) def test_promote_share_replica_driver_update_nothing_has_snaps(self, retval): replica = fake_replica() active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) snapshots_instances = [ fakes.fake_snapshot(create_instance=True, share_id=replica['share_id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(create_instance=True, share_id=replica['share_id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] replica_list = [replica, active_replica] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots_instances)) self.mock_object( self.share_manager.driver, 'promote_replica', mock.Mock(return_value=retval)) mock_snap_instance_update = self.mock_object( db, 'share_snapshot_instance_update') mock_info_log = self.mock_object(manager.LOG, 'info') mock_export_locs_update = self.mock_object( db, 'share_export_locations_update') mock_replica_update = self.mock_object(db, 'share_replica_update') call_1 = mock.call(mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE}) call_2 = mock.call( mock.ANY, 'current_active_replica', {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) expected_update_calls = [call_1, call_2] self.share_manager.promote_share_replica(self.context, replica) self.assertFalse(mock_export_locs_update.called) mock_replica_update.assert_has_calls(expected_update_calls, any_order=True) mock_snap_instance_update.assert_called_once_with( mock.ANY, 'test_creating_to_err', {'status': constants.STATUS_ERROR}) self.assertEqual(2, mock_info_log.call_count) def test_promote_share_replica_driver_updates_replica_list(self): replica = fake_replica() active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) replica_list = [replica, active_replica, fake_replica(id=3)] updated_replica_list = [ { 'id': replica['id'], 'export_locations': ['TEST1', 'TEST2'], 'replica_state': constants.REPLICA_STATE_ACTIVE, }, { 'id': 'current_active_replica', 'export_locations': 'junk_return_value', 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, { 'id': 'other_replica', 'export_locations': ['TEST1', 'TEST2'], }, ] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) mock_snap_instance_update = self.mock_object( db, 'share_snapshot_instance_update') self.mock_object( self.share_manager.driver, 'promote_replica', mock.Mock(return_value=updated_replica_list)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_export_locs_update = self.mock_object( db, 'share_export_locations_update') mock_replica_update = self.mock_object(db, 'share_replica_update') reset_replication_change_call = mock.call( mock.ANY, replica['id'], {'replica_state': constants.STATUS_ACTIVE, 'status': constants.STATUS_AVAILABLE}) self.share_manager.promote_share_replica(self.context, replica) self.assertEqual(2, mock_export_locs_update.call_count) self.assertEqual(2, mock_replica_update.call_count) self.assertTrue( reset_replication_change_call in mock_replica_update.mock_calls) self.assertTrue(mock_info_log.called) self.assertFalse(mock_snap_instance_update.called) @ddt.data('openstack1@watson#_pool0', 'openstack1@newton#_pool0') def test_periodic_share_replica_update(self, host): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = [ fake_replica(host='openstack1@watson#pool4'), fake_replica(host='openstack1@watson#pool5'), fake_replica(host='openstack1@newton#pool5'), fake_replica(host='openstack1@newton#pool5'), ] self.mock_object(self.share_manager.db, 'share_replicas_get_all', mock.Mock(return_value=replicas)) mock_update_method = self.mock_object( self.share_manager, '_share_replica_update') self.share_manager.host = host self.share_manager.periodic_share_replica_update(self.context) self.assertEqual(2, mock_update_method.call_count) self.assertEqual(1, mock_debug_log.call_count) @ddt.data(constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC) def test__share_replica_update_driver_exception(self, replica_state): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica(replica_state=replica_state) active_replica = fake_replica( replica_state=constants.REPLICA_STATE_ACTIVE) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(self.share_manager.driver, 'update_replica_state', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.share_manager._share_replica_update( self.context, replica, share_id=replica['share_id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR} ) self.assertEqual(1, mock_debug_log.call_count) def test__share_replica_update_driver_exception_ignored(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica(replica_state=constants.STATUS_ERROR) active_replica = fake_replica(replica_state=constants.STATUS_ACTIVE) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.share_manager.host = replica['host'] self.mock_object(self.share_manager.driver, 'update_replica_state', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.share_manager._share_replica_update( self.context, replica, share_id=replica['share_id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR} ) self.assertEqual(1, mock_debug_log.call_count) @ddt.data({'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE, }, {'status': constants.STATUS_DELETING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, {'status': constants.STATUS_CREATING, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, }, {'status': constants.STATUS_MANAGING, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, }, {'status': constants.STATUS_UNMANAGING, 'replica_state': constants.REPLICA_STATE_ACTIVE, }, {'status': constants.STATUS_EXTENDING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, {'status': constants.STATUS_SHRINKING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }) def test__share_replica_update_unqualified_replica(self, state): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_warning_log = self.mock_object(manager.LOG, 'warning') mock_driver_call = self.mock_object( self.share_manager.driver, 'update_replica_state') mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') replica = fake_replica(**state) self.mock_object(db, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.share_manager._share_replica_update(self.context, replica, share_id=replica['share_id']) self.assertFalse(mock_debug_log.called) self.assertFalse(mock_warning_log.called) self.assertFalse(mock_driver_call.called) self.assertFalse(mock_db_update_call.called) @ddt.data(None, constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.REPLICA_STATE_ACTIVE, constants.STATUS_ERROR) def test__share_replica_update(self, retval): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_warning_log = self.mock_object(manager.LOG, 'warning') replica_states = [constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC] replica = fake_replica(replica_state=random.choice(replica_states), share_server='fake_share_server') active_replica = fake_replica( id='fake2', replica_state=constants.STATUS_ACTIVE) snapshots = [fakes.fake_snapshot( create_instance=True, aggregate_status=constants.STATUS_AVAILABLE)] snapshot_instances = [ fakes.fake_snapshot_instance(share_instance_id=replica['id']), fakes.fake_snapshot_instance(share_instance_id='fake2'), ] del replica['availability_zone'] self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_server_get', mock.Mock(return_value='fake_share_server')) mock_db_update_calls = [] self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) mock_driver_call = self.mock_object( self.share_manager.driver, 'update_replica_state', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.share_manager._share_replica_update( self.context, replica, share_id=replica['share_id']) if retval == constants.REPLICA_STATE_ACTIVE: self.assertEqual(1, mock_warning_log.call_count) elif retval: self.assertEqual(0, mock_warning_log.call_count) self.assertTrue(mock_driver_call.called) snapshot_list_arg = mock_driver_call.call_args[0][4] self.assertTrue('active_replica_snapshot' in snapshot_list_arg[0]) self.assertTrue('share_replica_snapshot' in snapshot_list_arg[0]) mock_db_update_call.assert_has_calls(mock_db_update_calls) self.assertEqual(1, mock_debug_log.call_count) def test_update_share_replica_replica_not_found(self): replica = fake_replica() self.mock_object( self.share_manager.db, 'share_replica_get', mock.Mock( side_effect=exception.ShareReplicaNotFound(replica_id='fake'))) self.mock_object(self.share_manager, '_get_share_server') driver_call = self.mock_object( self.share_manager, '_share_replica_update') self.assertRaises( exception.ShareReplicaNotFound, self.share_manager.update_share_replica, self.context, replica, share_id=replica['share_id']) self.assertFalse(driver_call.called) def test_update_share_replica_replica(self): replica_update_call = self.mock_object( self.share_manager, '_share_replica_update') self.mock_object(self.share_manager.db, 'share_replica_get') retval = self.share_manager.update_share_replica( self.context, 'fake_replica_id', share_id='fake_share_id') self.assertIsNone(retval) self.assertTrue(replica_update_call.called) def test_create_delete_share_snapshot(self): """Test share's snapshot can be created and deleted.""" def _fake_create_snapshot(self, snapshot, **kwargs): snapshot['progress'] = '99%' return snapshot.to_dict() self.mock_object(self.share_manager.driver, "create_snapshot", _fake_create_snapshot) share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.share_manager.create_snapshot(self.context, share_id, snapshot_id) self.assertEqual(share_id, db.share_snapshot_get(context.get_admin_context(), snapshot_id).share_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEqual(constants.STATUS_AVAILABLE, snap['status']) self.share_manager.delete_snapshot(self.context, snapshot_id) self.assertRaises(exception.NotFound, db.share_snapshot_get, self.context, snapshot_id) def test_create_delete_share_snapshot_error(self): """Test snapshot can be created and deleted with error.""" def _raise_not_found(self, *args, **kwargs): raise exception.NotFound() self.mock_object(self.share_manager.driver, "create_snapshot", mock.Mock(side_effect=_raise_not_found)) self.mock_object(self.share_manager.driver, "delete_snapshot", mock.Mock(side_effect=_raise_not_found)) share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.assertRaises(exception.NotFound, self.share_manager.create_snapshot, self.context, share_id, snapshot_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEqual(constants.STATUS_ERROR, snap['status']) self.assertRaises(exception.NotFound, self.share_manager.delete_snapshot, self.context, snapshot_id) self.assertEqual( constants.STATUS_ERROR_DELETING, db.share_snapshot_get(self.context, snapshot_id).status) self.share_manager.driver.create_snapshot.assert_called_once_with( self.context, utils.IsAMatcher(models.ShareSnapshotInstance), share_server=None) self.share_manager.driver.delete_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareSnapshotInstance), share_server=None) def test_delete_snapshot_quota_error(self): share = db_utils.create_share() share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_AVAILABLE) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError('fake'))) self.mock_object(quota.QUOTAS, 'commit') self.share_manager.delete_snapshot(self.context, snapshot_id) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, project_id=six.text_type(snapshot['project_id']), snapshots=-1, snapshot_gigabytes=-snapshot['size'], user_id=six.text_type(snapshot['user_id']) ) self.assertFalse(quota.QUOTAS.commit.called) def test_delete_share_instance_if_busy(self): """Test snapshot could not be deleted if busy.""" def _raise_share_snapshot_is_busy(self, *args, **kwargs): raise exception.ShareSnapshotIsBusy(snapshot_name='fakename') self.mock_object(self.share_manager.driver, "delete_snapshot", mock.Mock(side_effect=_raise_share_snapshot_is_busy)) share = db_utils.create_share(status=constants.STATUS_ACTIVE) snapshot = db_utils.create_snapshot(share_id=share['id']) snapshot_id = snapshot['id'] self.share_manager.delete_snapshot(self.context, snapshot_id) snap = db.share_snapshot_get(self.context, snapshot_id) self.assertEqual(constants.STATUS_AVAILABLE, snap['status']) self.share_manager.driver.delete_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareSnapshotInstance), share_server=None) def test_create_share_instance_with_share_network_dhss_false(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) share_network_id = 'fake_sn' share_instance = db_utils.create_share( share_network_id=share_network_id).instance self.mock_object( self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_instance_update') self.assertRaisesRegex( exception.ManilaException, '.*%s.*' % share_instance['id'], self.share_manager.create_share_instance, self.context, share_instance['id']) self.share_manager.db.share_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance['id'], with_share_data=True ) self.share_manager.db.share_instance_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance['id'], {'status': constants.STATUS_ERROR}) def test_create_share_instance_with_share_network_server_not_exists(self): """Test share can be created without share server.""" share_net = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_net['id']) share_id = share['id'] def fake_setup_server(context, share_network, *args, **kwargs): return db_utils.create_share_server( share_network_id=share_network['id'], host='fake_host') self.mock_object(manager.LOG, 'info') self.share_manager.driver.create_share = mock.Mock( return_value='fake_location') self.share_manager._setup_server = fake_setup_server self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) manager.LOG.info.assert_called_with(mock.ANY, share.instance['id']) def test_create_share_instance_with_share_network_server_fail(self): fake_share = db_utils.create_share(share_network_id='fake_sn_id', size=1) fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(db, 'share_instance_update', mock.Mock(return_value=fake_share.instance)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_share.instance)) self.mock_object(manager.LOG, 'error') def raise_share_server_not_found(*args, **kwargs): raise exception.ShareServerNotFound( share_server_id=fake_server['id']) def raise_manila_exception(*args, **kwargs): raise exception.ManilaException() self.mock_object(db, 'share_server_get_all_by_host_and_share_net_valid', mock.Mock(side_effect=raise_share_server_not_found)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(side_effect=raise_manila_exception)) self.assertRaises( exception.ManilaException, self.share_manager.create_share_instance, self.context, fake_share.instance['id'], ) db.share_server_get_all_by_host_and_share_net_valid.\ assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, fake_share['share_network_id'], ) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) db.share_instance_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_share.instance['id'], {'status': constants.STATUS_ERROR}, ) ]) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server) manager.LOG.error.assert_called_with(mock.ANY, fake_share.instance['id']) def test_create_share_instance_with_share_network_not_found(self): """Test creation fails if share network not found.""" self.mock_object(manager.LOG, 'error') share = db_utils.create_share(share_network_id='fake-net-id') share_id = share['id'] self.assertRaises( exception.ShareNetworkNotFound, self.share_manager.create_share_instance, self.context, share.instance['id'] ) manager.LOG.error.assert_called_with(mock.ANY, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) def test_create_share_instance_with_share_network_server_exists(self): """Test share can be created with existing share server.""" share_net = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_net['id']) share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host) share_id = share['id'] self.mock_object(manager.LOG, 'info') driver_mock = mock.Mock() driver_mock.create_share.return_value = "fake_location" driver_mock.choose_share_server_compatible_with_share.return_value = ( share_srv ) self.share_manager.driver = driver_mock self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.setup_network.called) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(shr['status'], constants.STATUS_AVAILABLE) self.assertEqual(shr['share_server_id'], share_srv['id']) self.assertTrue(len(shr['export_location']) > 0) self.assertEqual(1, len(shr['export_locations'])) manager.LOG.info.assert_called_with(mock.ANY, share.instance['id']) @ddt.data('export_location', 'export_locations') def test_create_share_instance_with_error_in_driver(self, details_key): """Test db updates if share creation fails in driver.""" share = db_utils.create_share() share_id = share['id'] some_data = 'fake_location' self.share_manager.driver = mock.Mock() e = exception.ManilaException(detail_data={details_key: some_data}) self.share_manager.driver.create_share.side_effect = e self.assertRaises( exception.ManilaException, self.share_manager.create_share_instance, self.context, share.instance['id'] ) self.assertTrue(self.share_manager.driver.create_share.called) shr = db.share_get(self.context, share_id) self.assertEqual(some_data, shr['export_location']) def test_create_share_instance_with_server_created(self): """Test share can be created and share server is created.""" share_net = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_net['id']) db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host, status=constants.STATUS_ERROR) share_id = share['id'] fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(return_value=fake_server)) self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual('fake_srv_id', shr['share_server_id']) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server) def test_create_share_instance_update_replica_state(self): share_net = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_net['id'], replication_type='dr') db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host, status=constants.STATUS_ERROR) share_id = share['id'] fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(return_value=fake_server)) self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) shr_instances = db.share_instances_get_all_by_share( self.context, shr['id']) self.assertEqual(1, len(shr_instances)) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual( constants.REPLICA_STATE_ACTIVE, shr_instances[0]['replica_state']) self.assertEqual('fake_srv_id', shr['share_server_id']) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server) @ddt.data(True, False) def test_create_delete_share_instance_error(self, exception_update_access): """Test share can be created and deleted with error.""" def _raise_exception(self, *args, **kwargs): raise exception.ManilaException('fake') self.mock_object(self.share_manager.driver, "create_share", mock.Mock(side_effect=_raise_exception)) self.mock_object(self.share_manager.driver, "delete_share", mock.Mock(side_effect=_raise_exception)) if exception_update_access: self.mock_object( self.share_manager.access_helper, "update_access_rules", mock.Mock(side_effect=_raise_exception)) share = db_utils.create_share() share_id = share['id'] self.assertRaises(exception.ManilaException, self.share_manager.create_share_instance, self.context, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_instance, self.context, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR_DELETING, shr['status']) self.share_manager.driver.create_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), share_server=None) if not exception_update_access: self.share_manager.driver.delete_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), share_server=None) def test_create_share_instance_update_availability_zone(self): share = db_utils.create_share(availability_zone=None) share_id = share['id'] self.share_manager.create_share_instance( self.context, share.instance['id']) actual_share = db.share_get(context.get_admin_context(), share_id) self.assertIsNotNone(actual_share.availability_zone) self.assertEqual(manager.CONF.storage_availability_zone, actual_share.availability_zone) def test_provide_share_server_for_share_incompatible_servers(self): fake_exception = exception.ManilaException("fake") fake_share_server = {'id': 'fake'} share = db_utils.create_share() self.mock_object(db, 'share_server_get_all_by_host_and_share_net_valid', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager.driver, "choose_share_server_compatible_with_share", mock.Mock(side_effect=fake_exception) ) self.assertRaises(exception.ManilaException, self.share_manager._provide_share_server_for_share, self.context, "fake_id", share.instance) driver_mock = self.share_manager.driver driver_method_mock = ( driver_mock.choose_share_server_compatible_with_share ) driver_method_mock.assert_called_once_with( self.context, [fake_share_server], share.instance, snapshot=None, consistency_group=None) def test_provide_share_server_for_share_invalid_arguments(self): self.assertRaises(ValueError, self.share_manager._provide_share_server_for_share, self.context, None, None) def test_provide_share_server_for_share_parent_ss_not_found(self): fake_parent_id = "fake_server_id" fake_exception = exception.ShareServerNotFound("fake") share = db_utils.create_share() fake_snapshot = { 'share': { 'instance': { 'share_server_id': fake_parent_id } } } self.mock_object(db, 'share_server_get', mock.Mock(side_effect=fake_exception)) self.assertRaises(exception.ShareServerNotFound, self.share_manager._provide_share_server_for_share, self.context, "fake_id", share.instance, snapshot=fake_snapshot) db.share_server_get.assert_called_once_with( self.context, fake_parent_id) def test_provide_share_server_for_share_parent_ss_invalid(self): fake_parent_id = "fake_server_id" share = db_utils.create_share() fake_snapshot = { 'share': { 'instance': { 'share_server_id': fake_parent_id } } } fake_parent_share_server = {'status': 'fake'} self.mock_object(db, 'share_server_get', mock.Mock(return_value=fake_parent_share_server)) self.assertRaises(exception.InvalidShareServer, self.share_manager._provide_share_server_for_share, self.context, "fake_id", share.instance, snapshot=fake_snapshot) db.share_server_get.assert_called_once_with( self.context, fake_parent_id) def test_provide_share_server_for_cg_incompatible_servers(self): fake_exception = exception.ManilaException("fake") fake_share_server = {'id': 'fake'} cg = db_utils.create_consistency_group() self.mock_object(db, 'share_server_get_all_by_host_and_share_net_valid', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager.driver, "choose_share_server_compatible_with_cg", mock.Mock(side_effect=fake_exception) ) self.assertRaises(exception.ManilaException, self.share_manager._provide_share_server_for_cg, self.context, "fake_id", cg) driver_mock = self.share_manager.driver driver_method_mock = ( driver_mock.choose_share_server_compatible_with_cg ) driver_method_mock.assert_called_once_with( self.context, [fake_share_server], cg, cgsnapshot=None) def test_provide_share_server_for_cg_invalid_arguments(self): self.assertRaises(exception.InvalidInput, self.share_manager._provide_share_server_for_cg, self.context, None, None) def test_manage_share_invalid_driver(self): self.mock_object(self.share_manager, 'driver', mock.Mock()) self.share_manager.driver.driver_handles_share_servers = True self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) share = db_utils.create_share() share_id = share['id'] self.assertRaises( exception.InvalidDriverMode, self.share_manager.manage_share, self.context, share_id, {}) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) def test_manage_share_invalid_share_type(self): self.mock_object(self.share_manager, 'driver', mock.Mock()) self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='True')) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) share = db_utils.create_share() share_id = share['id'] self.assertRaises( exception.ManageExistingShareTypeMismatch, self.share_manager.manage_share, self.context, share_id, {}) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) def test_manage_share_driver_exception(self): CustomException = type('CustomException', (Exception,), dict()) self.mock_object(self.share_manager, 'driver', mock.Mock()) self.share_manager.driver.driver_handles_share_servers = False self.mock_object(self.share_manager.driver, 'manage_existing', mock.Mock(side_effect=CustomException)) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( CustomException, self.share_manager.manage_share, self.context, share_id, driver_options) self.share_manager.driver.manage_existing.\ assert_called_once_with(mock.ANY, driver_options) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) def test_manage_share_invalid_size(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( exception.InvalidShare, self.share_manager.manage_share, self.context, share_id, driver_options) self.share_manager.driver.manage_existing.\ assert_called_once_with(mock.ANY, driver_options) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) def test_manage_share_quota_error(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value={'size': 3})) self.mock_object(self.share_manager, '_update_quota_usages', mock.Mock(side_effect=exception.QuotaError)) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( exception.QuotaError, self.share_manager.manage_share, self.context, share_id, driver_options) self.share_manager.driver.manage_existing.\ assert_called_once_with(mock.ANY, driver_options) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) self.share_manager._update_quota_usages.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share['project_id'], {'shares': 1, 'gigabytes': 3}) @ddt.data( {'size': 1}, {'size': 2, 'name': 'fake'}, {'size': 3, 'export_locations': ['foo', 'bar', 'quuz']}) def test_manage_share_valid_share(self, driver_data): export_locations = driver_data.get('export_locations') self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(self.share_manager, 'driver', mock.Mock()) self.mock_object(self.share_manager, '_update_quota_usages', mock.Mock()) self.mock_object( self.share_manager.db, 'share_export_locations_update', mock.Mock(side_effect=( self.share_manager.db.share_export_locations_update))) self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value=driver_data)) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.share_manager.manage_share(self.context, share_id, driver_options) self.share_manager.driver.manage_existing.\ assert_called_once_with(mock.ANY, driver_options) if export_locations: self.share_manager.db.share_export_locations_update.\ assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], export_locations, delete=True) else: self.assertFalse( self.share_manager.db.share_export_locations_update.called) valid_share_data = { 'status': constants.STATUS_AVAILABLE, 'launched_at': mock.ANY} valid_share_data.update(driver_data) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, valid_share_data) def test_update_quota_usages_new(self): self.mock_object(self.share_manager.db, 'quota_usage_get', mock.Mock(return_value={'in_use': 1})) self.mock_object(self.share_manager.db, 'quota_usage_update') project_id = 'fake_project_id' resource_name = 'fake' usage = 1 self.share_manager._update_quota_usages( self.context, project_id, {resource_name: usage}) self.share_manager.db.quota_usage_get.assert_called_once_with( mock.ANY, project_id, resource_name, mock.ANY) self.share_manager.db.quota_usage_update.assert_called_once_with( mock.ANY, project_id, mock.ANY, resource_name, in_use=2) def test_update_quota_usages_update(self): project_id = 'fake_project_id' resource_name = 'fake' usage = 1 side_effect = exception.QuotaUsageNotFound(project_id=project_id) self.mock_object( self.share_manager.db, 'quota_usage_get', mock.Mock(side_effect=side_effect)) self.mock_object(self.share_manager.db, 'quota_usage_create') self.share_manager._update_quota_usages( self.context, project_id, {resource_name: usage}) self.share_manager.db.quota_usage_get.assert_called_once_with( mock.ANY, project_id, resource_name, mock.ANY) self.share_manager.db.quota_usage_create.assert_called_once_with( mock.ANY, project_id, mock.ANY, resource_name, usage) def _setup_unmanage_mocks(self, mock_driver=True, mock_unmanage=None): if mock_driver: self.mock_object(self.share_manager, 'driver') if mock_unmanage: self.mock_object(self.share_manager.driver, "unmanage", mock_unmanage) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_delete') @ddt.data(True, False) def test_unmanage_share_invalid_driver(self, driver_handles_share_servers): self._setup_unmanage_mocks() self.share_manager.driver.driver_handles_share_servers = ( driver_handles_share_servers ) share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_unmanage_share_invalid_share(self): unmanage = mock.Mock(side_effect=exception.InvalidShare(reason="fake")) self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=unmanage) share = db_utils.create_share() self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_unmanage_share_valid_share(self): manager.CONF.set_default('driver_handles_share_servers', False) self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) share = db_utils.create_share() share_id = share['id'] share_instance_id = share.instance['id'] self.share_manager.unmanage_share(self.context, share_id) self.share_manager.driver.unmanage.\ assert_called_once_with(mock.ANY) self.share_manager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) def test_unmanage_share_valid_share_with_quota_error(self): manager.CONF.set_default('driver_handles_share_servers', False) self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=Exception())) share = db_utils.create_share() share_instance_id = share.instance['id'] self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.driver.unmanage.\ assert_called_once_with(mock.ANY) self.share_manager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) def test_unmanage_share_remove_access_rules_error(self): manager.CONF.set_default('driver_handles_share_servers', False) manager.CONF.unmanage_remove_access_rules = True self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=Exception()) ) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[])) share = db_utils.create_share() self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_unmanage_share_valid_share_remove_access_rules(self): manager.CONF.set_default('driver_handles_share_servers', False) manager.CONF.unmanage_remove_access_rules = True self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) smanager = self.share_manager self.mock_object(smanager.access_helper, 'update_access_rules') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[])) share = db_utils.create_share() share_id = share['id'] share_instance_id = share.instance['id'] smanager.unmanage_share(self.context, share_id) smanager.driver.unmanage.assert_called_once_with(mock.ANY) smanager.access_helper.update_access_rules.assert_called_once_with( mock.ANY, mock.ANY, delete_rules='all', share_server=None ) smanager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) def test_delete_share_instance_share_server_not_found(self): share_net = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id='fake-id') self.assertRaises( exception.ShareServerNotFound, self.share_manager.delete_share_instance, self.context, share.instance['id'] ) @ddt.data(True, False) def test_delete_share_instance_last_on_srv_with_sec_service( self, with_details): share_net = db_utils.create_share_network() sec_service = db_utils.create_security_service( share_network_id=share_net['id']) backend_details = dict( security_service_ldap=jsonutils.dumps(sec_service)) if with_details: share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host, backend_details=backend_details) else: share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host) db.share_server_backend_details_set( context.get_admin_context(), share_srv['id'], backend_details) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) self.share_manager.driver = mock.Mock() manager.CONF.delete_share_server_with_last_share = True self.share_manager.delete_share_instance(self.context, share.instance['id']) self.share_manager.driver.teardown_server.assert_called_once_with( server_details=backend_details, security_services=[jsonutils.loads( backend_details['security_service_ldap'])]) @ddt.data({'force': True, 'side_effect': 'update_access'}, {'force': True, 'side_effect': 'delete_share'}, {'force': False, 'side_effect': None}) @ddt.unpack def test_delete_share_instance_last_on_server(self, force, side_effect): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host ) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) self.share_manager.driver = mock.Mock() if side_effect == 'update_access': self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=Exception('fake'))) if side_effect == 'delete_share': self.mock_object(self.share_manager.driver, 'delete_share', mock.Mock(side_effect=Exception('fake'))) self.mock_object(manager.LOG, 'error') manager.CONF.delete_share_server_with_last_share = True self.share_manager.delete_share_instance( self.context, share.instance['id'], force=force) self.share_manager.driver.teardown_server.assert_called_once_with( server_details=share_srv.get('backend_details'), security_services=[]) self.assertEqual(force, manager.LOG.error.called) def test_delete_share_instance_last_on_server_deletion_disabled(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host ) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.share_manager.driver = mock.Mock() self.share_manager.delete_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.teardown_network.called) def test_delete_share_instance_not_last_on_server(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host ) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) manager.CONF.delete_share_server_with_last_share = True self.share_manager.driver = mock.Mock() self.share_manager.delete_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.teardown_network.called) @ddt.data('update_access', 'delete_share') def test_delete_share_instance_not_found(self, side_effect): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( share_network_id=share_net['id'], host=self.share_manager.host) share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) access = db_utils.create_access(share_id=share['id']) db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) self.share_manager.driver = mock.Mock() self.share_manager.access_helper.driver = mock.Mock() if side_effect == 'update_access': self.mock_object( self.share_manager.access_helper.driver, 'update_access', mock.Mock(side_effect=exception.ShareResourceNotFound( share_id=share['id']))) if side_effect == 'delete_share': self.mock_object( self.share_manager.driver, 'delete_share', mock.Mock(side_effect=exception.ShareResourceNotFound( share_id=share['id']))) self.mock_object( self.share_manager.access_helper, '_check_needs_refresh', mock.Mock(return_value=False) ) self.mock_object(manager.LOG, 'warning') self.share_manager.delete_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.teardown_network.called) (self.share_manager.access_helper.driver.update_access. assert_called_once_with(utils.IsAMatcher( context.RequestContext), share.instance, [], add_rules=[], delete_rules=[access], share_server=share_srv)) self.assertTrue(manager.LOG.warning.called) def test_allow_deny_access(self): """Test access rules to share can be created and deleted.""" self.mock_object(share_access.LOG, 'info') share = db_utils.create_share() share_id = share['id'] share_instance = db_utils.create_share_instance( share_id=share_id, access_rules_status=constants.STATUS_OUT_OF_SYNC) share_instance_id = share_instance['id'] access = db_utils.create_access(share_id=share_id, share_instance_id=share_instance_id) access_id = access['id'] self.share_manager.allow_access(self.context, share_instance_id, [access_id]) self.assertEqual('active', db.share_instance_get( self.context, share_instance_id).access_rules_status) share_access.LOG.info.assert_called_with(mock.ANY, share_instance_id) share_access.LOG.info.reset_mock() self.share_manager.deny_access(self.context, share_instance_id, [access_id]) share_access.LOG.info.assert_called_with(mock.ANY, share_instance_id) share_access.LOG.info.reset_mock() def test_allow_deny_access_error(self): """Test access rules to share can be created and deleted with error.""" def _fake_allow_access(self, *args, **kwargs): raise exception.NotFound() def _fake_deny_access(self, *args, **kwargs): raise exception.NotFound() self.mock_object(self.share_manager.access_helper.driver, "allow_access", _fake_allow_access) self.mock_object(self.share_manager.access_helper.driver, "deny_access", _fake_deny_access) share = db_utils.create_share() share_id = share['id'] share_instance = db_utils.create_share_instance( share_id=share_id, access_rules_status=constants.STATUS_OUT_OF_SYNC) share_instance_id = share_instance['id'] access = db_utils.create_access(share_id=share_id, share_instance_id=share_instance_id) access_id = access['id'] def validate(method): self.assertRaises(exception.ManilaException, method, self.context, share_instance_id, [access_id]) inst = db.share_instance_get(self.context, share_instance_id) self.assertEqual(constants.STATUS_ERROR, inst['access_rules_status']) validate(self.share_manager.allow_access) validate(self.share_manager.deny_access) def test_setup_server(self): # Setup required test data share_server = { 'id': 'fake_id', 'share_network_id': 'fake_sn_id', } metadata = {'fake_metadata_key': 'fake_metadata_value'} share_network = {'id': 'fake_sn_id'} network_info = {'security_services': []} for ss_type in constants.SECURITY_SERVICES_ALLOWED_TYPES: network_info['security_services'].append({ 'name': 'fake_name' + ss_type, 'domain': 'fake_domain' + ss_type, 'server': 'fake_server' + ss_type, 'dns_ip': 'fake_dns_ip' + ss_type, 'user': 'fake_user' + ss_type, 'type': ss_type, 'password': 'fake_password' + ss_type, }) sec_services = network_info['security_services'] server_info = {'fake_server_info_key': 'fake_server_info_value'} network_info['network_type'] = 'fake_network_type' # mock required stuff self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager.driver, 'allocate_network') self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=network_info)) self.mock_object(self.share_manager, '_validate_segmentation_id') self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(return_value=server_info)) self.mock_object(self.share_manager.db, 'share_server_backend_details_set') self.mock_object(self.share_manager.db, 'share_server_update', mock.Mock(return_value=share_server)) # execute method _setup_server result = self.share_manager._setup_server( self.context, share_server, metadata=metadata) # verify results self.assertEqual(share_server, result) self.share_manager.db.share_network_get.assert_has_calls([ mock.call(self.context, share_server['share_network_id']), mock.call(self.context, share_server['share_network_id']), ]) self.share_manager.driver.allocate_network.assert_called_once_with( self.context, share_server, share_network) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network) self.share_manager._validate_segmentation_id.assert_called_once_with( network_info) self.share_manager.driver.setup_server.assert_called_once_with( network_info, metadata=metadata) self.share_manager.db.share_server_backend_details_set.\ assert_has_calls([ mock.call(self.context, share_server['id'], {'security_service_' + sec_services[0]['type']: jsonutils.dumps(sec_services[0])}), mock.call(self.context, share_server['id'], {'security_service_' + sec_services[1]['type']: jsonutils.dumps(sec_services[1])}), mock.call(self.context, share_server['id'], {'security_service_' + sec_services[2]['type']: jsonutils.dumps(sec_services[2])}), mock.call(self.context, share_server['id'], server_info), ]) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ACTIVE}) def test_setup_server_server_info_not_present(self): # Setup required test data share_server = { 'id': 'fake_id', 'share_network_id': 'fake_sn_id', } metadata = {'fake_metadata_key': 'fake_metadata_value'} share_network = {'id': 'fake_sn_id'} network_info = { 'fake_network_info_key': 'fake_network_info_value', 'security_services': [], 'network_type': 'fake_network_type', } server_info = {} # mock required stuff self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=network_info)) self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(return_value=server_info)) self.mock_object(self.share_manager.db, 'share_server_update', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.driver, 'allocate_network') # execute method _setup_server result = self.share_manager._setup_server( self.context, share_server, metadata=metadata) # verify results self.assertEqual(share_server, result) self.share_manager.db.share_network_get.assert_has_calls([ mock.call(self.context, share_server['share_network_id']), mock.call(self.context, share_server['share_network_id'])]) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network) self.share_manager.driver.setup_server.assert_called_once_with( network_info, metadata=metadata) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ACTIVE}) self.share_manager.driver.allocate_network.assert_called_once_with( self.context, share_server, share_network) def setup_server_raise_exception(self, detail_data_proper): # Setup required test data share_server = { 'id': 'fake_id', 'share_network_id': 'fake_sn_id', } server_info = {'details_key': 'value'} share_network = {'id': 'fake_sn_id'} network_info = { 'fake_network_info_key': 'fake_network_info_value', 'security_services': [], 'network_type': 'fake_network_type', } if detail_data_proper: detail_data = {'server_details': server_info} self.mock_object(self.share_manager.db, 'share_server_backend_details_set') else: detail_data = 'not dictionary detail data' # Mock required parameters self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager.db, 'share_server_update') for m in ['deallocate_network', 'allocate_network']: self.mock_object(self.share_manager.driver, m) self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=network_info)) self.mock_object(self.share_manager.db, 'share_server_backend_details_set') self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(side_effect=exception.ManilaException( detail_data=detail_data))) # execute method _setup_server self.assertRaises( exception.ManilaException, self.share_manager._setup_server, self.context, share_server, ) # verify results if detail_data_proper: self.share_manager.db.share_server_backend_details_set.\ assert_called_once_with( self.context, share_server['id'], server_info) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ERROR}) self.share_manager.db.share_network_get.assert_has_calls([ mock.call(self.context, share_server['share_network_id']), mock.call(self.context, share_server['share_network_id'])]) self.share_manager.driver.allocate_network.assert_has_calls([ mock.call(self.context, share_server, share_network)]) self.share_manager.driver.deallocate_network.assert_has_calls([ mock.call(self.context, share_server['id'])]) def test_setup_server_incorrect_detail_data(self): self.setup_server_raise_exception(detail_data_proper=False) def test_setup_server_exception_in_driver(self): self.setup_server_raise_exception(detail_data_proper=True) @ddt.data({}, {'detail_data': 'fake'}, {'detail_data': {'server_details': 'fake'}}, {'detail_data': {'server_details': {'fake': 'fake'}}}, {'detail_data': { 'server_details': {'fake': 'fake', 'fake2': 'fake2'}}},) def test_setup_server_exception_in_cleanup_after_error(self, data): def get_server_details_from_data(data): d = data.get('detail_data') if not isinstance(d, dict): return {} d = d.get('server_details') if not isinstance(d, dict): return {} return d share_server = {'id': 'fake', 'share_network_id': 'fake'} details = get_server_details_from_data(data) exc_mock = mock.Mock(side_effect=exception.ManilaException(**data)) details_mock = mock.Mock(side_effect=exception.ManilaException()) self.mock_object(self.share_manager.db, 'share_network_get', exc_mock) self.mock_object(self.share_manager.db, 'share_server_backend_details_set', details_mock) self.mock_object(self.share_manager.db, 'share_server_update') self.mock_object(self.share_manager.driver, 'deallocate_network') self.mock_object(manager.LOG, 'debug') self.mock_object(manager.LOG, 'warning') self.assertRaises( exception.ManilaException, self.share_manager._setup_server, self.context, share_server, ) self.assertTrue(self.share_manager.db.share_network_get.called) if details: self.assertEqual(len(details), details_mock.call_count) expected = [mock.call(mock.ANY, share_server['id'], {k: v}) for k, v in details.items()] self.assertEqual(expected, details_mock.call_args_list) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ERROR}) self.share_manager.driver.deallocate_network.assert_called_once_with( self.context, share_server['id'] ) self.assertFalse(manager.LOG.warning.called) if get_server_details_from_data(data): self.assertTrue(manager.LOG.debug.called) def test_ensure_share_instance_has_pool_with_only_host(self): fake_share = { 'status': constants.STATUS_AVAILABLE, 'host': 'host1', 'id': 1} host = self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertIsNone(host) def test_ensure_share_instance_has_pool_with_full_pool_name(self): fake_share = {'host': 'host1#pool0', 'id': 1, 'status': constants.STATUS_AVAILABLE} fake_share_expected_value = 'pool0' host = self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertEqual(fake_share_expected_value, host) def test_ensure_share_instance_has_pool_unable_to_fetch_share(self): fake_share = {'host': 'host@backend', 'id': 1, 'status': constants.STATUS_AVAILABLE} with mock.patch.object(self.share_manager.driver, 'get_pool', side_effect=Exception): with mock.patch.object(manager, 'LOG') as mock_LOG: self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertEqual(1, mock_LOG.error.call_count) def test__form_server_setup_info(self): def fake_network_allocations_get_for_share_server(*args, **kwargs): if kwargs.get('label') != 'admin': return ['foo', 'bar'] return ['admin-foo', 'admin-bar'] self.mock_object( self.share_manager.db, 'network_allocations_get_for_share_server', mock.Mock( side_effect=fake_network_allocations_get_for_share_server)) fake_share_server = dict( id='fake_share_server_id', backend_details=dict(foo='bar')) fake_share_network = dict( segmentation_id='fake_segmentation_id', cidr='fake_cidr', neutron_net_id='fake_neutron_net_id', neutron_subnet_id='fake_neutron_subnet_id', nova_net_id='fake_nova_net_id', security_services='fake_security_services', network_type='fake_network_type') expected = dict( server_id=fake_share_server['id'], segmentation_id=fake_share_network['segmentation_id'], cidr=fake_share_network['cidr'], neutron_net_id=fake_share_network['neutron_net_id'], neutron_subnet_id=fake_share_network['neutron_subnet_id'], nova_net_id=fake_share_network['nova_net_id'], security_services=fake_share_network['security_services'], network_allocations=( fake_network_allocations_get_for_share_server()), admin_network_allocations=( fake_network_allocations_get_for_share_server(label='admin')), backend_details=fake_share_server['backend_details'], network_type=fake_share_network['network_type']) network_info = self.share_manager._form_server_setup_info( self.context, fake_share_server, fake_share_network) self.assertEqual(expected, network_info) self.share_manager.db.network_allocations_get_for_share_server.\ assert_has_calls([ mock.call(self.context, fake_share_server['id'], label='user'), mock.call(self.context, fake_share_server['id'], label='admin') ]) @ddt.data( {'network_info': {'network_type': 'vlan', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '4094'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '16777215'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '4294967295'}}, {'network_info': {'network_type': 'flat', 'segmentation_id': None}}, {'network_info': {'network_type': 'flat', 'segmentation_id': 0}}, {'network_info': {'network_type': None, 'segmentation_id': None}}, {'network_info': {'network_type': None, 'segmentation_id': 0}}) @ddt.unpack def test_validate_segmentation_id_with_valid_values(self, network_info): self.share_manager._validate_segmentation_id(network_info) @ddt.data( {'network_info': {'network_type': 'vlan', 'segmentation_id': None}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': -1}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': 0}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '4095'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': None}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': 0}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '16777216'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': None}}, {'network_info': {'network_type': 'gre', 'segmentation_id': 0}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '4294967296'}}, {'network_info': {'network_type': 'flat', 'segmentation_id': '1000'}}, {'network_info': {'network_type': None, 'segmentation_id': '1000'}}) @ddt.unpack def test_validate_segmentation_id_with_invalid_values(self, network_info): self.assertRaises(exception.NetworkBadConfigurationException, self.share_manager._validate_segmentation_id, network_info) @ddt.data(5, 70) def test_verify_server_cleanup_interval_invalid_cases(self, val): data = dict(DEFAULT=dict(unused_share_server_cleanup_interval=val)) with test_utils.create_temp_config_with_opts(data): self.assertRaises(exception.InvalidParameterValue, manager.ShareManager) @ddt.data(10, 36, 60) def test_verify_server_cleanup_interval_valid_cases(self, val): data = dict(DEFAULT=dict(unused_share_server_cleanup_interval=val)) with test_utils.create_temp_config_with_opts(data): manager.ShareManager() @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock()) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) def test_delete_free_share_servers_cleanup_disabled(self): data = dict(DEFAULT=dict(automatic_share_server_cleanup=False)) with test_utils.create_temp_config_with_opts(data): share_manager = manager.ShareManager() share_manager.driver.initialized = True share_manager.delete_free_share_servers(self.context) self.assertFalse(db.share_server_get_all_unused_deletable.called) @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock()) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) def test_delete_free_share_servers_driver_handles_ss_disabled(self): data = dict(DEFAULT=dict(driver_handles_share_servers=False)) with test_utils.create_temp_config_with_opts(data): share_manager = manager.ShareManager() share_manager.driver.initialized = True share_manager.delete_free_share_servers(self.context) self.assertFalse(db.share_server_get_all_unused_deletable.called) self.assertFalse(share_manager.delete_share_server.called) @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock(return_value=['server1', ])) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) @mock.patch.object(timeutils, 'utcnow', mock.Mock( return_value=datetime.timedelta(minutes=20))) def test_delete_free_share_servers(self): self.share_manager.delete_free_share_servers(self.context) db.share_server_get_all_unused_deletable.assert_called_once_with( self.context, self.share_manager.host, datetime.timedelta(minutes=10)) self.share_manager.delete_share_server.assert_called_once_with( self.context, 'server1') timeutils.utcnow.assert_called_once_with() def test_extend_share_invalid(self): share = db_utils.create_share() share_id = share['id'] reservations = {} self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(self.share_manager.driver, 'extend_share', mock.Mock(side_effect=Exception('fake'))) self.assertRaises( exception.ShareExtendingError, self.share_manager.extend_share, self.context, share_id, 123, {}) quota.QUOTAS.rollback.assert_called_once_with( mock.ANY, reservations, project_id=six.text_type(share['project_id']), user_id=six.text_type(share['user_id']) ) def test_extend_share(self): share = db_utils.create_share() share_id = share['id'] new_size = 123 shr_update = { 'size': int(new_size), 'status': constants.STATUS_AVAILABLE.lower() } reservations = {} fake_share_server = 'fake' manager = self.share_manager self.mock_object(manager, 'driver') self.mock_object(manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(manager.db, 'share_update', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(manager.driver, 'extend_share') self.mock_object(manager, '_get_share_server', mock.Mock(return_value=fake_share_server)) self.share_manager.extend_share(self.context, share_id, new_size, reservations) self.assertTrue(manager._get_share_server.called) manager.driver.extend_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=fake_share_server ) quota.QUOTAS.commit.assert_called_once_with( mock.ANY, reservations, project_id=share['project_id'], user_id=share['user_id']) manager.db.share_update.assert_called_once_with( mock.ANY, share_id, shr_update ) def test_shrink_share_quota_error(self): size = 5 new_size = 1 share = db_utils.create_share(size=size) share_id = share['id'] self.mock_object(self.share_manager.db, 'share_update') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=Exception('fake'))) self.assertRaises( exception.ShareShrinkingError, self.share_manager.shrink_share, self.context, share_id, new_size) quota.QUOTAS.reserve.assert_called_with( mock.ANY, project_id=six.text_type(share['project_id']), user_id=six.text_type(share['user_id']), gigabytes=new_size - size ) self.assertTrue(self.share_manager.db.share_update.called) @ddt.data({'exc': exception.InvalidShare('fake'), 'status': constants.STATUS_SHRINKING_ERROR}, {'exc': exception.ShareShrinkingPossibleDataLoss("fake"), 'status': constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR}) @ddt.unpack def test_shrink_share_invalid(self, exc, status): share = db_utils.create_share() new_size = 1 share_id = share['id'] size_decrease = int(share['size']) - new_size self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(self.share_manager.driver, 'shrink_share', mock.Mock(side_effect=exc)) self.assertRaises( exception.ShareShrinkingError, self.share_manager.shrink_share, self.context, share_id, new_size) self.share_manager.driver.shrink_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=None ) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share_id, {'status': status} ) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, gigabytes=-size_decrease, project_id=share['project_id'], user_id=share['user_id'] ) quota.QUOTAS.rollback.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], user_id=share['user_id'] ) self.assertTrue(self.share_manager.db.share_get.called) def test_shrink_share(self): share = db_utils.create_share() share_id = share['id'] new_size = 123 shr_update = { 'size': int(new_size), 'status': constants.STATUS_AVAILABLE } fake_share_server = 'fake' size_decrease = int(share['size']) - new_size manager = self.share_manager self.mock_object(manager, 'driver') self.mock_object(manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(manager.db, 'share_update', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(manager.driver, 'shrink_share') self.mock_object(manager, '_get_share_server', mock.Mock(return_value=fake_share_server)) self.share_manager.shrink_share(self.context, share_id, new_size) self.assertTrue(manager._get_share_server.called) manager.driver.shrink_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=fake_share_server ) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, gigabytes=-size_decrease, project_id=share['project_id'], user_id=share['user_id'] ) quota.QUOTAS.commit.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], user_id=share['user_id'] ) manager.db.share_update.assert_called_once_with( mock.ANY, share_id, shr_update ) def test_report_driver_status_driver_handles_ss_false(self): fake_stats = {'field': 'val'} fake_pool = {'name': 'pool1'} self.share_manager.last_capabilities = {'field': 'old_val'} self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value=fake_stats) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock()) driver.driver_handles_share_servers = False driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with( refresh=True) self.assertFalse(db.share_server_get_all_by_host.called) self.assertFalse(driver.get_share_server_pools.called) self.assertEqual(fake_stats, self.share_manager.last_capabilities) def test_report_driver_status_driver_handles_ss(self): fake_stats = {'field': 'val'} fake_ss = {'id': '1234'} fake_pool = {'name': 'pool1'} self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value=fake_stats) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock( return_value=[fake_ss])) driver.driver_handles_share_servers = True driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with(refresh=True) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host) driver.get_share_server_pools.assert_called_once_with(fake_ss) expected_stats = { 'field': 'val', 'server_pools_mapping': { '1234': fake_pool}, } self.assertEqual(expected_stats, self.share_manager.last_capabilities) def test_report_driver_status_empty_share_stats(self): old_capabilities = {'field': 'old_val'} fake_pool = {'name': 'pool1'} self.share_manager.last_capabilities = old_capabilities self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value={}) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock()) driver.driver_handles_share_servers = True driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with(refresh=True) self.assertFalse(db.share_server_get_all_by_host.called) self.assertFalse(driver.get_share_server_pools.called) self.assertEqual(old_capabilities, self.share_manager.last_capabilities) def test_create_consistency_group(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group', mock.Mock(return_value=None)) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_cg_with_share_network_driver_not_handles_servers(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) cg_id = 'fake_cg_id' share_network_id = 'fake_sn' fake_cg = {'id': 'fake_id', 'share_network_id': share_network_id} self.mock_object( self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update') self.assertRaises( exception.ManilaException, self.share_manager.create_consistency_group, self.context, cg_id) self.share_manager.db.consistency_group_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id) self.share_manager.db.consistency_group_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id, {'status': constants.STATUS_ERROR}) def test_create_cg_with_share_network_driver_handles_servers(self): manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=True)) share_network_id = 'fake_sn' fake_cg = {'id': 'fake_id', 'share_network_id': share_network_id, 'host': "fake_host"} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager, '_provide_share_server_for_cg', mock.Mock(return_value=({}, fake_cg))) self.mock_object(self.share_manager.driver, 'create_consistency_group', mock.Mock(return_value=None)) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_consistency_group_with_update(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group', mock.Mock(return_value={'foo': 'bar'})) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'}) self.share_manager.db.consistency_group_update.\ assert_any_call(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_consistency_group_with_error(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_consistency_group, self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR}) def test_create_consistency_group_from_cgsnapshot(self): fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': [], 'share_server_id': 'fake_ss_id'} fake_ss = {'id': 'fake_ss_id', 'share_network_id': 'fake_sn'} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': [], 'consistency_group': {'share_server_id': fake_ss['id']}} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock( return_value=fake_ss)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) self.share_manager.db.share_server_get(mock.ANY, 'fake_ss_id') self.share_manager.driver.create_consistency_group_from_cgsnapshot.\ assert_called_once_with( mock.ANY, fake_cg, fake_snap, share_server=fake_ss) def test_create_cg_cgsnapshot_share_network_driver_not_handles_servers( self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) cg_id = 'fake_cg_id' share_network_id = 'fake_sn' fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': [], 'share_network_id': share_network_id, 'host': "fake_host"} self.mock_object( self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'consistency_group_update') self.assertRaises(exception.ManilaException, self.share_manager.create_consistency_group, self.context, cg_id) self.share_manager.db.consistency_group_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id) self.share_manager.db.consistency_group_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id, {'status': constants.STATUS_ERROR}) def test_create_cg_from_cgsnapshot_share_network_driver_handles_servers( self): manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object(self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=True)) share_network_id = 'fake_sn' fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': [], 'share_network_id': share_network_id} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager, '_provide_share_server_for_cg', mock.Mock(return_value=({}, fake_cg))) self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_consistency_group_from_cgsnapshot_with_update(self): fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': []} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock(return_value=({'foo': 'bar'}, None))) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'}) self.share_manager.db.consistency_group_update.\ assert_any_call(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_consistency_group_from_cgsnapshot_with_share_update(self): fake_share = {'id': 'fake_share_id'} fake_export_locations = ['my_export_location'] fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': [fake_share]} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'consistency_group_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_export_locations_update') fake_share_update_list = [{'id': fake_share['id'], 'foo': 'bar', 'export_locations': fake_export_locations}] self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock( return_value=(None, fake_share_update_list))) self.share_manager.create_consistency_group(self.context, "fake_id") self.share_manager.db.share_instance_update.\ assert_any_call(mock.ANY, 'fake_share_id', {'foo': 'bar'}) self.share_manager.db.share_export_locations_update.\ assert_any_call(mock.ANY, 'fake_share_id', fake_export_locations) self.share_manager.db.consistency_group_update.\ assert_any_call(mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_consistency_group_from_cgsnapshot_with_error(self): fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': []} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_instances_get_all_by_consistency_group_id', mock.Mock(return_value=[])) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_consistency_group, self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR}) def test_create_consistency_group_from_cgsnapshot_with_share_error(self): fake_share = {'id': 'fake_share_id'} fake_cg = {'id': 'fake_id', 'source_cgsnapshot_id': 'fake_snap_id', 'shares': [fake_share]} fake_snap = {'id': 'fake_snap_id', 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_instances_get_all_by_consistency_group_id', mock.Mock(return_value=[fake_share])) self.mock_object(self.share_manager.db, 'consistency_group_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.driver, 'create_consistency_group_from_cgsnapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_consistency_group, self.context, "fake_id") self.share_manager.db.share_instance_update.\ assert_any_call(mock.ANY, 'fake_share_id', {'status': constants.STATUS_ERROR}) self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR}) def test_delete_consistency_group(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_destroy', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'delete_consistency_group', mock.Mock(return_value=None)) self.share_manager.delete_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_destroy.\ assert_called_once_with(mock.ANY, 'fake_id') def test_delete_consistency_group_with_update(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_destroy', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'delete_consistency_group', mock.Mock(return_value={'foo': 'bar'})) self.share_manager.delete_consistency_group(self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'foo': 'bar'}) self.share_manager.db.consistency_group_destroy.\ assert_called_once_with(mock.ANY, 'fake_id') def test_delete_consistency_group_with_error(self): fake_cg = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'consistency_group_get', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.db, 'consistency_group_update', mock.Mock(return_value=fake_cg)) self.mock_object(self.share_manager.driver, 'delete_consistency_group', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.delete_consistency_group, self.context, "fake_id") self.share_manager.db.consistency_group_update.\ assert_called_once_with(mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR}) def test_create_cgsnapshot(self): fake_snap = {'id': 'fake_snap_id', 'consistency_group': {}, 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'cgsnapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.driver, 'create_cgsnapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_cgsnapshot(self.context, fake_snap['id']) self.share_manager.db.cgsnapshot_update.\ assert_called_once_with(mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_cgsnapshot_with_update(self): fake_snap = {'id': 'fake_snap_id', 'consistency_group': {}, 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'cgsnapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.driver, 'create_cgsnapshot', mock.Mock(return_value=({'foo': 'bar'}, None))) self.share_manager.create_cgsnapshot(self.context, fake_snap['id']) self.share_manager.db.cgsnapshot_update.\ assert_any_call(mock.ANY, 'fake_snap_id', {'foo': 'bar'}) self.share_manager.db.cgsnapshot_update.assert_any_call( mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) def test_create_cgsnapshot_with_member_update(self): fake_member = { 'id': 'fake_member_id', 'share_instance_id': 'blah', } fake_member_update = { 'id': 'fake_member_id', 'foo': 'bar' } fake_snap = {'id': 'fake_snap_id', 'consistency_group': {}, 'cgsnapshot_members': [fake_member]} self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'cgsnapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'cgsnapshot_member_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value={'id': 'blah'})) self.mock_object(self.share_manager.driver, 'create_cgsnapshot', mock.Mock(return_value=(None, [fake_member_update]))) self.share_manager.create_cgsnapshot(self.context, fake_snap['id']) self.share_manager.db.cgsnapshot_update.assert_any_call( mock.ANY, fake_snap['id'], {'cgsnapshot_members': [fake_member_update]}) self.share_manager.db.cgsnapshot_update.\ assert_any_call(mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY}) self.assertTrue(self.share_manager.db.cgsnapshot_member_update.called) def test_create_cgsnapshot_with_error(self): fake_snap = {'id': 'fake_snap_id', 'consistency_group': {}, 'cgsnapshot_members': []} self.mock_object(self.share_manager.db, 'cgsnapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'cgsnapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.driver, 'create_cgsnapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_cgsnapshot, self.context, fake_snap['id']) self.share_manager.db.cgsnapshot_update.\ assert_called_once_with(mock.ANY, fake_snap['id'], {'status': constants.STATUS_ERROR}) def test_migration_get_info(self): share_instance = {'share_server_id': 'fake_server_id'} share_instance_id = 'fake_id' share_server = 'fake_share_server' migration_info = 'fake_info' # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.driver, 'migration_get_info', mock.Mock(return_value=migration_info)) # run result = self.share_manager.migration_get_info( self.context, share_instance_id) # asserts self.assertEqual(migration_info, result) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, share_instance_id, with_share_data=True) self.share_manager.driver.migration_get_info.assert_called_once_with( self.context, share_instance, share_server) def test_migration_get_driver_info(self): share_instance = {'share_server_id': 'fake_server_id'} share_instance_id = 'fake-id' share_server = 'fake-share-server' migration_info = 'fake_info' # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.driver, 'migration_get_driver_info', mock.Mock(return_value=migration_info)) result = self.share_manager.migration_get_driver_info( self.context, share_instance_id) # asserts self.assertEqual(migration_info, result) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, share_instance_id, with_share_data=True) self.share_manager.driver.migration_get_driver_info.\ assert_called_once_with(self.context, share_instance, share_server) @ddt.data((True, 'fake_model_update'), exception.ManilaException()) def test_migration_start(self, exc): server = 'fake_share_server' instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id='fake_server_id') share = db_utils.create_share(id='fake_id', instances=[instance]) host = 'fake_host' driver_migration_info = 'driver_fake_info' # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', mock.Mock(return_value=driver_migration_info)) if isinstance(exc, exception.ManilaException): self.mock_object(self.share_manager.driver, 'migration_start', mock.Mock(side_effect=exc)) self.mock_object(self.share_manager, '_migration_start_generic', mock.Mock(side_effect=Exception('fake'))) self.mock_object(manager.LOG, 'exception') else: self.mock_object(self.share_manager.driver, 'migration_start', mock.Mock(return_value=exc)) # run if isinstance(exc, exception.ManilaException): self.assertRaises(exception.ShareMigrationFailed, self.share_manager.migration_start, self.context, 'fake_id', host, False, False) else: self.share_manager.migration_start( self.context, 'fake_id', host, False, False) # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), instance['share_server_id']) share_update_calls = [ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), mock.call( self.context, share['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) ] share_instance_update_calls = [ mock.call(self.context, instance['id'], {'status': constants.STATUS_MIGRATING}) ] if isinstance(exc, exception.ManilaException): share_update_calls.append(mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR})) share_instance_update_calls.append( mock.call(self.context, instance['id'], {'status': constants.STATUS_AVAILABLE})) self.share_manager._migration_start_generic.\ assert_called_once_with(self.context, share, instance, host, False) self.assertTrue(manager.LOG.exception.called) else: share_update_calls.append(mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})) share_instance_update_calls.append( mock.call(self.context, instance['id'], 'fake_model_update')) self.share_manager.db.share_update.assert_has_calls(share_update_calls) self.share_manager.db.share_instance_update.assert_has_calls( share_instance_update_calls) rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( self.context, instance) self.share_manager.driver.migration_start.assert_called_once_with( self.context, instance, server, host, driver_migration_info, False) @ddt.data(None, Exception('fake')) def test__migration_start_generic(self, exc): instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id='fake_server_id') new_instance = db_utils.create_share_instance( share_id='new_fake_id', status=constants.STATUS_AVAILABLE) share = db_utils.create_share(id='fake_id', instances=[instance]) server = 'share_server' src_migration_info = 'src_fake_info' dest_migration_info = 'dest_fake_info' # mocks self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_instance_update', mock.Mock(return_value=server)) self.mock_object(migration_api.ShareMigrationHelper, 'change_to_read_only') if exc is None: self.mock_object(migration_api.ShareMigrationHelper, 'create_instance_and_wait', mock.Mock(return_value=new_instance)) self.mock_object(self.share_manager.driver, 'migration_get_info', mock.Mock(return_value=src_migration_info)) self.mock_object(rpcapi.ShareAPI, 'migration_get_info', mock.Mock(return_value=dest_migration_info)) self.mock_object(data_rpc.DataAPI, 'migration_start', mock.Mock(side_effect=Exception('fake'))) self.mock_object(migration_api.ShareMigrationHelper, 'cleanup_new_instance') else: self.mock_object(migration_api.ShareMigrationHelper, 'create_instance_and_wait', mock.Mock(side_effect=exc)) self.mock_object(migration_api.ShareMigrationHelper, 'cleanup_access_rules') # run self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_start_generic, self.context, share, instance, 'fake_host', False) # asserts self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), instance['share_server_id']) migration_api.ShareMigrationHelper.change_to_read_only.\ assert_called_once_with(instance, server, True, self.share_manager.driver) migration_api.ShareMigrationHelper.create_instance_and_wait.\ assert_called_once_with(share, instance, 'fake_host') migration_api.ShareMigrationHelper.\ cleanup_access_rules.assert_called_once_with( instance, server, self.share_manager.driver) if exc is None: self.share_manager.db.share_instance_update.\ assert_called_once_with( self.context, new_instance['id'], {'status': constants.STATUS_MIGRATING_TO}) self.share_manager.driver.migration_get_info.\ assert_called_once_with(self.context, instance, server) rpcapi.ShareAPI.migration_get_info.assert_called_once_with( self.context, new_instance) data_rpc.DataAPI.migration_start.assert_called_once_with( self.context, share['id'], ['lost+found'], instance['id'], new_instance['id'], src_migration_info, dest_migration_info, False) migration_api.ShareMigrationHelper.\ cleanup_new_instance.assert_called_once_with(new_instance) @ddt.data('fake_model_update', Exception('fake')) def test_migration_complete_driver(self, exc): server = 'fake_server' model_update = 'fake_model_update' instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id='fake_server_id') share = db_utils.create_share( id='fake_id', instances=[instance], task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_update') if isinstance(exc, Exception): self.mock_object(self.share_manager.driver, 'migration_complete', mock.Mock(side_effect=exc)) else: self.mock_object(self.share_manager.driver, 'migration_complete', mock.Mock(return_value=exc)) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', mock.Mock(return_value='fake_info')) self.mock_object(manager.LOG, 'exception') # run if isinstance(exc, Exception): self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_complete, self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') else: self.share_manager.migration_complete( self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'fake_server_id') self.share_manager.driver.migration_complete.assert_called_once_with( self.context, instance, server, 'fake_info') rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( self.context, instance) if isinstance(exc, Exception): self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.assertTrue(manager.LOG.exception.called) else: self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) self.share_manager.db.share_instance_update.\ assert_called_once_with(self.context, instance['id'], model_update) @ddt.data(None, Exception('fake')) def test_migration_complete_generic(self, exc): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, '_migration_complete', mock.Mock(side_effect=exc)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(manager.LOG, 'exception') # run if exc: self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_complete, self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') else: self.share_manager.migration_complete( self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager._migration_complete.assert_called_once_with( self.context, share, 'fake_ins_id', 'new_fake_ins_id') if exc: self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.share_manager.db.share_instance_update.\ assert_called_once_with( self.context, 'fake_ins_id', {'status': constants.STATUS_AVAILABLE}) self.assertTrue(manager.LOG.exception.called) @ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_DATA_COPYING_CANCELLED, constants.TASK_STATE_DATA_COPYING_COMPLETED, 'other') def test__migration_complete_status(self, status): instance = db_utils.create_share_instance( share_id='fake_id', share_server_id='fake_server_id') new_instance = db_utils.create_share_instance(share_id='fake_id') share = db_utils.create_share(id='fake_id', task_state=status) server = 'fake_server' # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instance, new_instance])) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(migration_api.ShareMigrationHelper, 'cleanup_new_instance') self.mock_object(migration_api.ShareMigrationHelper, 'cleanup_access_rules') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: self.mock_object(migration_api.ShareMigrationHelper, 'apply_new_access_rules', mock.Mock(side_effect=Exception('fake'))) self.mock_object(manager.LOG, 'exception') # run if status == constants.TASK_STATE_DATA_COPYING_CANCELLED: self.share_manager._migration_complete( self.context, share, instance['id'], new_instance['id']) else: self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_complete, self.context, share, instance['id'], new_instance['id']) # asserts self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance['id'], with_share_data=True), mock.call(self.context, new_instance['id'], with_share_data=True) ]) self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'fake_server_id') if status != 'other': migration_api.ShareMigrationHelper.cleanup_new_instance.\ assert_called_once_with(new_instance) migration_api.ShareMigrationHelper.cleanup_access_rules.\ assert_called_once_with(instance, server, self.share_manager.driver) if status == constants.TASK_STATE_MIGRATION_CANCELLED: self.share_manager.db.share_instance_update.\ assert_called_once_with(self.context, instance['id'], {'status': constants.STATUS_AVAILABLE}) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: migration_api.ShareMigrationHelper.apply_new_access_rules.\ assert_called_once_with(new_instance) self.assertTrue(manager.LOG.exception.called) def test__migration_complete(self): instance = db_utils.create_share_instance( share_id='fake_id', share_server_id='fake_server_id') new_instance = db_utils.create_share_instance(share_id='fake_id') share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) server = 'fake_server' # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instance, new_instance])) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(migration_api.ShareMigrationHelper, 'delete_instance_and_wait') self.mock_object(migration_api.ShareMigrationHelper, 'apply_new_access_rules') # run self.share_manager._migration_complete( self.context, share, instance['id'], new_instance['id']) # asserts self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance['id'], with_share_data=True), mock.call(self.context, new_instance['id'], with_share_data=True) ]) self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'fake_server_id') self.share_manager.db.share_instance_update.assert_has_calls([ mock.call(self.context, new_instance['id'], {'status': constants.STATUS_AVAILABLE}), mock.call(self.context, instance['id'], {'status': constants.STATUS_INACTIVE}) ]) self.share_manager.db.share_update.assert_has_calls([ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}), ]) migration_api.ShareMigrationHelper.apply_new_access_rules.\ assert_called_once_with(new_instance) migration_api.ShareMigrationHelper.delete_instance_and_wait.\ assert_called_once_with(instance) def test_migration_cancel(self): server = db_utils.create_share_server() share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, share_server_id=server['id']) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', mock.Mock(return_value='migration_info')) self.mock_object(self.share_manager.driver, 'migration_cancel') self.share_manager.migration_cancel(self.context, share) rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( self.context, share.instance) self.share_manager.driver.migration_cancel.assert_called_once_with( self.context, share.instance, server, 'migration_info') def test_migration_cancel_invalid(self): share = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_cancel, self.context, share) def test_migration_get_progress(self): server = db_utils.create_share_server() share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, share_server_id=server['id']) expected = 'fake_progress' self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', mock.Mock(return_value='migration_info')) self.mock_object(self.share_manager.driver, 'migration_get_progress', mock.Mock(return_value=expected)) result = self.share_manager.migration_get_progress(self.context, share) self.assertEqual(expected, result) rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( self.context, share.instance) self.share_manager.driver.migration_get_progress.\ assert_called_once_with( self.context, share.instance, server, 'migration_info') def test_migration_get_progress_invalid(self): share = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_get_progress, self.context, share) def test_manage_snapshot_invalid_driver_mode(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = True share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) driver_options = {'fake': 'fake'} self.assertRaises( exception.InvalidDriverMode, self.share_manager.manage_snapshot, self.context, snapshot['id'], driver_options) def test_manage_snapshot_invalid_snapshot(self): fake_share_server = 'fake_share_server' self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=fake_share_server)) share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) driver_options = {'fake': 'fake'} mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.assertRaises( exception.InvalidShareSnapshot, self.share_manager.manage_snapshot, self.context, snapshot['id'], driver_options) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) def test_manage_snapshot_driver_exception(self): CustomException = type('CustomException', (Exception,), {}) self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False mock_manage = self.mock_object(self.share_manager.driver, 'manage_existing_snapshot', mock.Mock(side_effect=CustomException)) mock_get_share_server = self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) driver_options = {} mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.assertRaises( CustomException, self.share_manager.manage_snapshot, self.context, snapshot['id'], driver_options) mock_manage.assert_called_once_with(mock.ANY, driver_options) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) @ddt.data( {'size': 1}, {'size': 2, 'name': 'fake'}, {'size': 3}) def test_manage_snapshot_valid_snapshot(self, driver_data): mock_get_share_server = self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_update') self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager, '_update_quota_usages') self.share_manager.driver.driver_handles_share_servers = False mock_manage = self.mock_object( self.share_manager.driver, "manage_existing_snapshot", mock.Mock(return_value=driver_data)) size = driver_data['size'] share = db_utils.create_share(size=size) snapshot = db_utils.create_snapshot(share_id=share['id'], size=size) snapshot_id = snapshot['id'] driver_options = {} mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.share_manager.manage_snapshot(self.context, snapshot_id, driver_options) mock_manage.assert_called_once_with(mock.ANY, driver_options) valid_snapshot_data = { 'status': constants.STATUS_AVAILABLE} valid_snapshot_data.update(driver_data) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_id, valid_snapshot_data) self.share_manager._update_quota_usages.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['project_id'], {'snapshots': 1, 'snapshot_gigabytes': size}) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_id) def test_unmanage_snapshot_invalid_driver_mode(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = True share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object(self.share_manager.db, 'share_snapshot_update') ret = self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.assertIsNone(ret) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_unmanage_snapshot_invalid_snapshot(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value='fake_share_server')) self.mock_object(self.share_manager.db, 'share_snapshot_update') share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) ret = self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.assertIsNone(ret) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) def test_unmanage_snapshot_invalid_share(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False mock_unmanage = mock.Mock( side_effect=exception.UnmanageInvalidShareSnapshot(reason="fake")) self.mock_object(self.share_manager.driver, "unmanage_snapshot", mock_unmanage) mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_update') share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) self.share_manager.driver.unmanage_snapshot.assert_called_once_with( mock.ANY) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) @ddt.data(False, True) def test_unmanage_snapshot_valid_snapshot(self, quota_error): if quota_error: self.mock_object(quota.QUOTAS, 'reserve', mock.Mock( side_effect=exception.ManilaException(message='error'))) mock_log_warning = self.mock_object(manager.LOG, 'warning') self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(self.share_manager.driver, "unmanage_snapshot") mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_snapshot_instance_destroy_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.share_manager.driver.unmanage_snapshot.assert_called_once_with( mock.ANY) mock_snapshot_instance_destroy_call.assert_called_once_with( mock.ANY, snapshot['instance']['id']) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) if quota_error: self.assertTrue(mock_log_warning.called) def _setup_crud_replicated_snapshot_data(self): snapshot = fakes.fake_snapshot(create_instance=True) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) snapshot_instances = [snapshot['instance'], snapshot_instance] replicas = [fake_replica(), fake_replica()] return snapshot, snapshot_instances, replicas def test_create_replicated_snapshot_driver_exception(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') self.assertRaises(exception.ManilaException, self.share_manager.create_replicated_snapshot, self.context, snapshot['id'], share_id='fake_share') mock_db_update_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id'], {'status': constants.STATUS_ERROR}), mock.call( self.context, snapshot_instances[1]['id'], {'status': constants.STATUS_ERROR}), ]) @ddt.data(None, []) def test_create_replicated_snapshot_driver_updates_nothing(self, retval): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') return_value = self.share_manager.create_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) self.assertFalse(mock_db_update_call.called) def test_create_replicated_snapshot_driver_updates_snapshot(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) snapshot_dict = { 'status': constants.STATUS_AVAILABLE, 'provider_location': 'spinners_end', 'progress': '100%', 'id': snapshot['instance']['id'], } self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(return_value=[snapshot_dict])) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') return_value = self.share_manager.create_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) mock_db_update_call.assert_called_once_with( self.context, snapshot['instance']['id'], snapshot_dict) def delete_replicated_snapshot_driver_exception(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') self.assertRaises(exception.ManilaException, self.share_manager.delete_replicated_snapshot, self.context, snapshot['id'], share_id='fake_share') mock_db_update_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id'], {'status': constants.STATUS_ERROR_DELETING}), mock.call( self.context, snapshot_instances[1]['id'], {'status': constants.STATUS_ERROR_DELETING}), ]) self.assertFalse(mock_db_delete_call.called) def delete_replicated_snapshot_driver_exception_ignored_with_force(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') retval = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(retval) mock_db_delete_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id']), mock.call( self.context, snapshot_instances[1]['id']), ]) self.assertFalse(mock_db_update_call.called) @ddt.data(None, []) def delete_replicated_snapshot_driver_updates_nothing(self, retval): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) self.assertFalse(mock_db_delete_call.called) self.assertFalse(mock_db_update_call.called) def delete_replicated_snapshot_driver_deletes_snapshots(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) retval = [{ 'status': constants.STATUS_DELETED, 'id': snapshot['instance']['id'], }] self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) mock_db_delete_call.assert_called_once_with( self.context, snapshot['instance']['id']) self.assertFalse(mock_db_update_call.called) @ddt.data(True, False) def delete_replicated_snapshot_drv_del_and_updates_snapshots(self, force): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) updated_instance_details = { 'status': constants.STATUS_ERROR, 'id': snapshot_instances[1]['id'], 'provider_location': 'azkaban', } retval = [ { 'status': constants.STATUS_DELETED, 'id': snapshot['instance']['id'], }, ] retval.append(updated_instance_details) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share', force=force) self.assertIsNone(return_value) if force: self.assertTrue(2, mock_db_delete_call.call_count) self.assertFalse(mock_db_update_call.called) else: mock_db_delete_call.assert_called_once_with( self.context, snapshot['instance']['id']) mock_db_update_call.assert_called_once_with( self.context, snapshot_instances[1]['id'], updated_instance_details) def test_periodic_share_replica_snapshot_update(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = 3 * [ fake_replica(host='malfoy@manor#_pool0', replica_state=constants.REPLICA_STATE_IN_SYNC) ] replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE)) snapshot = fakes.fake_snapshot(create_instance=True, status=constants.STATUS_DELETING) snapshot_instances = 3 * [ fakes.fake_snapshot_instance(base_snapshot=snapshot) ] self.mock_object( db, 'share_replicas_get_all', mock.Mock(return_value=replicas)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) mock_snapshot_update_call = self.mock_object( self.share_manager, '_update_replica_snapshot') retval = self.share_manager.periodic_share_replica_snapshot_update( self.context) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertEqual(0, mock_snapshot_update_call.call_count) @ddt.data(True, False) def test_periodic_share_replica_snapshot_update_nothing_to_update( self, has_instances): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = 3 * [ fake_replica(host='malfoy@manor#_pool0', replica_state=constants.REPLICA_STATE_IN_SYNC) ] replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE)) snapshot = fakes.fake_snapshot(create_instance=True, status=constants.STATUS_DELETING) snapshot_instances = 3 * [ fakes.fake_snapshot_instance(base_snapshot=snapshot) ] self.mock_object(db, 'share_replicas_get_all', mock.Mock(side_effect=[[], replicas])) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=[snapshot_instances, []])) mock_snapshot_update_call = self.mock_object( self.share_manager, '_update_replica_snapshot') retval = self.share_manager.periodic_share_replica_snapshot_update( self.context) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertEqual(0, mock_snapshot_update_call.call_count) def test__update_replica_snapshot_replica_deleted_from_database(self): replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy') self.mock_object(db, 'share_replica_get', mock.Mock( side_effect=replica_not_found)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_driver_update_call = self.mock_object( self.share_manager.driver, 'update_replicated_snapshot') snaphot_instance = fakes.fake_snapshot_instance() retval = self.share_manager._update_replica_snapshot( self.context, snaphot_instance) self.assertIsNone(retval) mock_db_delete_call.assert_called_once_with( self.context, snaphot_instance['id']) self.assertFalse(mock_driver_update_call.called) self.assertFalse(mock_db_update_call.called) def test__update_replica_snapshot_both_deleted_from_database(self): replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy') instance_not_found = exception.ShareSnapshotInstanceNotFound( instance_id='spoon!') self.mock_object(db, 'share_replica_get', mock.Mock( side_effect=replica_not_found)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete', mock.Mock( side_effect=instance_not_found)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_driver_update_call = self.mock_object( self.share_manager.driver, 'update_replicated_snapshot') snapshot_instance = fakes.fake_snapshot_instance() retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance) self.assertIsNone(retval) mock_db_delete_call.assert_called_once_with( self.context, snapshot_instance['id']) self.assertFalse(mock_driver_update_call.called) self.assertFalse(mock_db_update_call.called) def test__update_replica_snapshot_driver_raises_Not_Found_exception(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance( status=constants.STATUS_DELETING) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( self.share_manager.driver, 'update_replicated_snapshot', mock.Mock( side_effect=exception.SnapshotResourceNotFound(name='abc'))) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance, replica_snapshots=None) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) mock_db_delete_call.assert_called_once_with( self.context, snapshot_instance['id']) self.assertFalse(mock_db_update_call.called) @ddt.data(exception.NotFound, exception.ManilaException) def test__update_replica_snapshot_driver_raises_other_exception(self, exc): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_info_log = self.mock_object(manager.LOG, 'info') mock_exception_log = self.mock_object(manager.LOG, 'exception') replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance( status=constants.STATUS_CREATING) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'update_replicated_snapshot', mock.Mock(side_effect=exc)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance) self.assertIsNone(retval) self.assertEqual(1, mock_exception_log.call_count) self.assertEqual(1, mock_debug_log.call_count) self.assertFalse(mock_info_log.called) mock_db_update_call.assert_called_once_with( self.context, snapshot_instance['id'], {'status': 'error'}) self.assertFalse(mock_db_delete_call.called) @ddt.data(True, False) def test__update_replica_snapshot_driver_updates_replica(self, update): replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance() driver_update = {} if update: driver_update = { 'id': snapshot_instance['id'], 'provider_location': 'knockturn_alley', 'status': constants.STATUS_AVAILABLE, } mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_info_log = self.mock_object(manager.LOG, 'info') self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'update_replicated_snapshot', mock.Mock(return_value=driver_update)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance, replica_snapshots=None) driver_update['progress'] = '100%' self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertFalse(mock_info_log.called) if update: mock_db_update_call.assert_called_once_with( self.context, snapshot_instance['id'], driver_update) else: self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_db_delete_call.called) @ddt.ddt class HookWrapperTestCase(test.TestCase): def setUp(self): super(HookWrapperTestCase, self).setUp() self.configuration = mock.Mock() self.configuration.safe_get.return_value = True @manager.add_hooks def _fake_wrapped_method(self, some_arg, some_kwarg): return "foo" def test_hooks_enabled(self): self.hooks = [mock.Mock(return_value=i) for i in range(2)] result = self._fake_wrapped_method( "some_arg", some_kwarg="some_kwarg_value") self.assertEqual("foo", result) for i, mock_hook in enumerate(self.hooks): mock_hook.execute_pre_hook.assert_called_once_with( "some_arg", func_name="_fake_wrapped_method", some_kwarg="some_kwarg_value") mock_hook.execute_post_hook.assert_called_once_with( "some_arg", func_name="_fake_wrapped_method", driver_action_results="foo", pre_hook_data=self.hooks[i].execute_pre_hook.return_value, some_kwarg="some_kwarg_value") def test_hooks_disabled(self): self.hooks = [] result = self._fake_wrapped_method( "some_arg", some_kwarg="some_kwarg_value") self.assertEqual("foo", result) for mock_hook in self.hooks: self.assertFalse(mock_hook.execute_pre_hook.called) self.assertFalse(mock_hook.execute_post_hook.called) manila-2.0.0/manila/tests/share/test_access.py0000664000567000056710000001573312701407107022502 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import access from manila import test from manila.tests import db_utils class ShareInstanceAccessTestCase(test.TestCase): def setUp(self): super(ShareInstanceAccessTestCase, self).setUp() self.driver = self.mock_class("manila.share.driver.ShareDriver", mock.Mock()) self.share_access_helper = access.ShareInstanceAccess(db, self.driver) self.context = context.get_admin_context() self.share = db_utils.create_share() self.share_instance = db_utils.create_share_instance( share_id=self.share['id'], access_rules_status=constants.STATUS_ERROR) def test_update_access_rules(self): original_rules = [] self.mock_object(db, "share_instance_get", mock.Mock( return_value=self.share_instance)) self.mock_object(db, "share_access_get_all_for_share", mock.Mock(return_value=original_rules)) self.mock_object(db, "share_instance_update_access_status", mock.Mock()) self.mock_object(self.driver, "update_access", mock.Mock()) self.share_access_helper.update_access_rules(self.context, self.share_instance['id']) self.driver.update_access.assert_called_with( self.context, self.share_instance, original_rules, add_rules=[], delete_rules=[], share_server=None) db.share_instance_update_access_status.assert_called_with( self.context, self.share_instance['id'], constants.STATUS_ACTIVE) def test_update_access_rules_fallback(self): add_rules = [db_utils.create_access(share_id=self.share['id'])] delete_rules = [db_utils.create_access(share_id=self.share['id'])] original_rules = [db_utils.create_access(share_id=self.share['id'])] self.mock_object(db, "share_instance_get", mock.Mock( return_value=self.share_instance)) self.mock_object(db, "share_access_get_all_for_share", mock.Mock(return_value=original_rules)) self.mock_object(db, "share_access_get_all_for_instance", mock.Mock(return_value=original_rules)) self.mock_object(db, "share_instance_update_access_status", mock.Mock()) self.mock_object(self.driver, "update_access", mock.Mock(side_effect=NotImplementedError)) self.mock_object(self.driver, "allow_access", mock.Mock()) self.mock_object(self.driver, "deny_access", mock.Mock()) self.share_access_helper.update_access_rules(self.context, self.share_instance['id'], add_rules, delete_rules) self.driver.update_access.assert_called_with( self.context, self.share_instance, original_rules, add_rules=add_rules, delete_rules=[], share_server=None) self.driver.allow_access.assert_called_with(self.context, self.share_instance, add_rules[0], share_server=None) self.driver.deny_access.assert_called_with(self.context, self.share_instance, delete_rules[0], share_server=None) db.share_instance_update_access_status.assert_called_with( self.context, self.share_instance['id'], constants.STATUS_ACTIVE) def test_update_access_rules_exception(self): original_rules = [] add_rules = [db_utils.create_access(share_id=self.share['id'])] delete_rules = 'all' self.mock_object(db, "share_instance_get", mock.Mock( return_value=self.share_instance)) self.mock_object(db, "share_access_get_all_for_instance", mock.Mock(return_value=original_rules)) self.mock_object(db, "share_instance_update_access_status", mock.Mock()) self.mock_object(self.driver, "update_access", mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_access_helper.update_access_rules, self.context, self.share_instance['id'], add_rules, delete_rules) self.driver.update_access.assert_called_with( self.context, self.share_instance, [], add_rules=add_rules, delete_rules=original_rules, share_server=None) db.share_instance_update_access_status.assert_called_with( self.context, self.share_instance['id'], constants.STATUS_ERROR) def test_update_access_rules_recursive_call(self): share_instance = db_utils.create_share_instance( access_rules_status=constants.STATUS_ACTIVE, share_id=self.share['id']) add_rules = [db_utils.create_access( share_id=self.share['id'])] original_rules = [] self.mock_object(db, "share_instance_get", mock.Mock( return_value=share_instance)) self.mock_object(db, "share_access_get_all_for_instance", mock.Mock(return_value=original_rules)) mock_update_access = self.mock_object(self.driver, "update_access") self.mock_object(self.share_access_helper, '_check_needs_refresh', mock.Mock(side_effect=[True, False])) self.share_access_helper.update_access_rules(self.context, share_instance['id'], add_rules=add_rules) mock_update_access.assert_has_calls([ mock.call(self.context, share_instance, original_rules, add_rules=add_rules, delete_rules=[], share_server=None), mock.call(self.context, share_instance, original_rules, add_rules=[], delete_rules=[], share_server=None) ]) manila-2.0.0/manila/tests/share/test_rpcapi.py0000664000567000056710000003341412701407112022507 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.share.rpcapi. """ import copy from oslo_config import cfg from oslo_serialization import jsonutils from manila.common import constants from manila import context from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils CONF = cfg.CONF class ShareRpcAPITestCase(test.TestCase): def setUp(self): super(ShareRpcAPITestCase, self).setUp() self.context = context.get_admin_context() share = db_utils.create_share( availability_zone=CONF.storage_availability_zone, status=constants.STATUS_AVAILABLE ) access = db_utils.create_access(share_id=share['id']) snapshot = db_utils.create_snapshot(share_id=share['id']) share_replica = db_utils.create_share_replica( id='fake_replica', share_id='fake_share_id', host='fake_host', ) share_server = db_utils.create_share_server() cg = {'id': 'fake_cg_id', 'host': 'fake_host'} cgsnapshot = {'id': 'fake_cg_id'} host = {'host': 'fake_host', 'capabilities': 1} self.fake_share = jsonutils.to_primitive(share) # mock out the getattr on the share db model object since jsonutils # doesn't know about those extra attributes to pull in self.fake_share['instance'] = jsonutils.to_primitive(share.instance) self.fake_share_replica = jsonutils.to_primitive(share_replica) self.fake_access = jsonutils.to_primitive(access) self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_share_server = jsonutils.to_primitive(share_server) self.fake_cg = jsonutils.to_primitive(cg) self.fake_cgsnapshot = jsonutils.to_primitive(cgsnapshot) self.fake_host = jsonutils.to_primitive(host) self.ctxt = context.RequestContext('fake_user', 'fake_project') self.rpcapi = share_rpcapi.ShareAPI() def test_serialized_share_has_id(self): self.assertTrue('id' in self.fake_share) def _test_share_api(self, method, rpc_method, **kwargs): expected_retval = 'foo' if method == 'call' else None target = { "version": kwargs.pop('version', self.rpcapi.BASE_RPC_API_VERSION) } expected_msg = copy.deepcopy(kwargs) if 'share' in expected_msg and method != 'get_migration_info': share = expected_msg['share'] del expected_msg['share'] expected_msg['share_id'] = share['id'] if 'share_instance' in expected_msg: share_instance = expected_msg.pop('share_instance', None) expected_msg['share_instance_id'] = share_instance['id'] if 'cg' in expected_msg: cg = expected_msg['cg'] del expected_msg['cg'] expected_msg['cg_id'] = cg['id'] if 'cgsnapshot' in expected_msg: snap = expected_msg['cgsnapshot'] del expected_msg['cgsnapshot'] expected_msg['cgsnapshot_id'] = snap['id'] if 'access' in expected_msg: access = expected_msg['access'] del expected_msg['access'] expected_msg['access_rules'] = [access['id']] if 'host' in expected_msg: del expected_msg['host'] if 'snapshot' in expected_msg: snapshot = expected_msg['snapshot'] del expected_msg['snapshot'] expected_msg['snapshot_id'] = snapshot['id'] if 'dest_host' in expected_msg: del expected_msg['dest_host'] expected_msg['host'] = self.fake_host if 'share_replica' in expected_msg: share_replica = expected_msg.pop('share_replica', None) expected_msg['share_replica_id'] = share_replica['id'] expected_msg['share_id'] = share_replica['share_id'] if 'replicated_snapshot' in expected_msg: snapshot = expected_msg.pop('replicated_snapshot', None) expected_msg['snapshot_id'] = snapshot['id'] expected_msg['share_id'] = snapshot['share_id'] if 'host' in kwargs: host = kwargs['host'] elif 'cg' in kwargs: host = kwargs['cg']['host'] elif 'share_instance' in kwargs: host = kwargs['share_instance']['host'] elif 'share_server' in kwargs: host = kwargs['share_server']['host'] elif 'share_replica' in kwargs: host = kwargs['share_replica']['host'] elif 'replicated_snapshot' in kwargs: host = kwargs['share']['instance']['host'] else: host = kwargs['share']['host'] target['server'] = host target['topic'] = '%s.%s' % (CONF.share_topic, host) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return self.rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.mock_object(self.rpcapi.client, "prepare", _fake_prepare_method) self.mock_object(self.rpcapi.client, rpc_method, _fake_rpc_method) retval = getattr(self.rpcapi, method)(self.ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [self.ctxt, method] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) for kwarg, value in self.fake_kwargs.items(): self.assertEqual(expected_msg[kwarg], value) def test_create_share_instance(self): self._test_share_api('create_share_instance', rpc_method='cast', version='1.4', share_instance=self.fake_share, host='fake_host1', snapshot_id='fake_snapshot_id', filter_properties=None, request_spec=None) def test_delete_share_instance(self): self._test_share_api('delete_share_instance', rpc_method='cast', version='1.4', share_instance=self.fake_share, force=False) def test_allow_access(self): self._test_share_api('allow_access', rpc_method='cast', version='1.7', share_instance=self.fake_share, access=self.fake_access) def test_deny_access(self): self._test_share_api('deny_access', rpc_method='cast', version='1.7', share_instance=self.fake_share, access=self.fake_access) def test_create_snapshot(self): self._test_share_api('create_snapshot', rpc_method='cast', share=self.fake_share, snapshot=self.fake_snapshot) def test_delete_snapshot(self): self._test_share_api('delete_snapshot', rpc_method='cast', snapshot=self.fake_snapshot, host='fake_host') def test_delete_share_server(self): self._test_share_api('delete_share_server', rpc_method='cast', share_server=self.fake_share_server) def test_extend_share(self): self._test_share_api('extend_share', rpc_method='cast', version='1.2', share=self.fake_share, new_size=123, reservations={'fake': 'fake'}) def test_shrink_share(self): self._test_share_api('shrink_share', rpc_method='cast', version='1.3', share=self.fake_share, new_size=123) def test_create_consistency_group(self): self._test_share_api('create_consistency_group', version='1.5', rpc_method='cast', cg=self.fake_cg, host='fake_host1') def test_delete_consistency_group(self): self._test_share_api('delete_consistency_group', version='1.5', rpc_method='cast', cg=self.fake_cg) def test_create_cgsnapshot(self): self._test_share_api('create_cgsnapshot', version='1.5', rpc_method='cast', cgsnapshot=self.fake_cgsnapshot, host='fake_host1') def test_delete_cgsnapshot(self): self._test_share_api('delete_cgsnapshot', version='1.5', rpc_method='cast', cgsnapshot=self.fake_cgsnapshot, host='fake_host1') def test_migration_start(self): fake_dest_host = self.Desthost() self._test_share_api('migration_start', rpc_method='cast', version='1.6', share=self.fake_share, dest_host=fake_dest_host, force_host_copy=True, notify=True) def test_migration_get_info(self): self._test_share_api('migration_get_info', rpc_method='call', version='1.6', share_instance=self.fake_share) def test_migration_get_driver_info(self): self._test_share_api('migration_get_driver_info', rpc_method='call', version='1.6', share_instance=self.fake_share) def test_migration_complete(self): self._test_share_api('migration_complete', rpc_method='cast', version='1.10', share=self.fake_share, share_instance_id='fake_ins_id', new_share_instance_id='new_fake_ins_id') def test_migration_cancel(self): self._test_share_api('migration_cancel', rpc_method='call', version='1.10', share=self.fake_share) def test_migration_get_progress(self): self._test_share_api('migration_get_progress', rpc_method='call', version='1.10', share=self.fake_share) def test_delete_share_replica(self): self._test_share_api('delete_share_replica', rpc_method='cast', version='1.8', share_replica=self.fake_share_replica, force=False) def test_promote_share_replica(self): self._test_share_api('promote_share_replica', rpc_method='cast', version='1.8', share_replica=self.fake_share_replica) def test_update_share_replica(self): self._test_share_api('update_share_replica', rpc_method='cast', version='1.8', share_replica=self.fake_share_replica) def test_manage_snapshot(self): self._test_share_api('manage_snapshot', rpc_method='cast', version='1.9', snapshot=self.fake_snapshot, host='fake_host', driver_options={'volume_snapshot_id': 'fake'}) def test_unmanage_snapshot(self): self._test_share_api('unmanage_snapshot', rpc_method='cast', version='1.9', snapshot=self.fake_snapshot, host='fake_host') def test_create_replicated_snapshot(self): self._test_share_api('create_replicated_snapshot', rpc_method='cast', version='1.11', replicated_snapshot=self.fake_snapshot, share=self.fake_share) def test_delete_replicated_snapshot(self): self._test_share_api('delete_replicated_snapshot', rpc_method='cast', version='1.11', replicated_snapshot=self.fake_snapshot, share_id=self.fake_snapshot['share_id'], force=False, host='fake_host') class Desthost(object): host = 'fake_host' capabilities = 1 manila-2.0.0/manila/tests/share/test_share_utils.py0000664000567000056710000001172212701407107023555 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with share.""" from manila.share import utils as share_utils from manila import test class ShareUtilsTestCase(test.TestCase): def test_extract_host_without_pool(self): host = 'Host@Backend' self.assertEqual( 'Host@Backend', share_utils.extract_host(host)) def test_extract_host_only_return_host(self): host = 'Host@Backend' self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) def test_extract_host_only_return_pool(self): host = 'Host@Backend' self.assertIsNone( share_utils.extract_host(host, 'pool')) def test_extract_host_only_return_backend(self): host = 'Host@Backend' self.assertEqual( 'Host@Backend', share_utils.extract_host(host, 'backend')) def test_extract_host_missing_backend_and_pool(self): host = 'Host' # Default level is 'backend' self.assertEqual( 'Host', share_utils.extract_host(host)) def test_extract_host_only_return_backend_name(self): host = 'Host@Backend#Pool' self.assertEqual( 'Backend', share_utils.extract_host(host, 'backend_name')) def test_extract_host_only_return_backend_name_index_error(self): host = 'Host#Pool' self.assertRaises(IndexError, share_utils.extract_host, host, 'backend_name') def test_extract_host_missing_backend(self): host = 'Host#Pool' self.assertEqual( 'Host', share_utils.extract_host(host)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) def test_extract_host_missing_backend_only_return_backend(self): host = 'Host#Pool' self.assertEqual( 'Host', share_utils.extract_host(host, 'backend')) def test_extract_host_missing_backend_only_return_pool(self): host = 'Host#Pool' self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool')) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool', True)) def test_extract_host_missing_pool(self): host = 'Host@Backend' self.assertIsNone( share_utils.extract_host(host, 'pool')) def test_extract_host_missing_pool_use_default_pool(self): host = 'Host@Backend' self.assertEqual( '_pool0', share_utils.extract_host(host, 'pool', True)) def test_extract_host_with_default_pool(self): host = 'Host' # Default_pool_name doesn't work for level other than 'pool' self.assertEqual( 'Host', share_utils.extract_host(host, 'host', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host', False)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', False)) def test_extract_host_with_pool(self): host = 'Host@Backend#Pool' self.assertEqual( 'Host@Backend', share_utils.extract_host(host)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) self.assertEqual( 'Host@Backend', share_utils.extract_host(host, 'backend'),) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool')) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool', True)) def test_append_host_with_host_and_pool(self): host = 'Host' pool = 'Pool' expected = 'Host#Pool' self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_host(self): host = 'Host' pool = None expected = 'Host' self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_pool(self): host = None pool = 'pool' expected = None self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_no_values(self): host = None pool = None expected = None self.assertEqual(expected, share_utils.append_host(host, pool)) manila-2.0.0/manila/tests/share/test_drivers_private_data.py0000664000567000056710000001330412701407107025432 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_utils import uuidutils from manila.share import drivers_private_data as pd from manila import test @ddt.ddt class DriverPrivateDataTestCase(test.TestCase): """Tests DriverPrivateData.""" def setUp(self): super(DriverPrivateDataTestCase, self).setUp() self.fake_storage = mock.Mock() self.entity_id = uuidutils.generate_uuid() def test_default_storage_driver(self): private_data = pd.DriverPrivateData( storage=None, context="fake", backend_host="fake") self.assertIsInstance(private_data._storage, pd.SqlStorageDriver) def test_custom_storage_driver(self): private_data = pd.DriverPrivateData(storage=self.fake_storage) self.assertEqual(self.fake_storage, private_data._storage) def test_invalid_parameters(self): self.assertRaises(ValueError, pd.DriverPrivateData) @ddt.data({'context': 'fake'}, {'backend_host': 'fake'}) def test_invalid_single_parameter(self, test_args): self.assertRaises(ValueError, pd.DriverPrivateData, **test_args) @ddt.data("111", ["fake"], None) def test_validate_entity_id_invalid(self, entity_id): data = pd.DriverPrivateData(storage="fake") self.assertRaises(ValueError, data._validate_entity_id, entity_id) def test_validate_entity_id_valid(self): actual_result = ( pd.DriverPrivateData._validate_entity_id(self.entity_id) ) self.assertIsNone(actual_result) def test_update(self): data = pd.DriverPrivateData(storage=self.fake_storage) details = {"foo": "bar"} self.mock_object(self.fake_storage, 'update', mock.Mock(return_value=True)) actual_result = data.update( self.entity_id, details, delete_existing=True ) self.assertTrue(actual_result) self.fake_storage.update.assert_called_once_with( self.entity_id, details, True ) def test_update_invalid(self): data = pd.DriverPrivateData(storage=self.fake_storage) details = ["invalid"] self.mock_object(self.fake_storage, 'update', mock.Mock(return_value=True)) self.assertRaises( ValueError, data.update, self.entity_id, details) self.assertFalse(self.fake_storage.update.called) def test_get(self): data = pd.DriverPrivateData(storage=self.fake_storage) key = "fake_key" value = "fake_value" default_value = "def" self.mock_object(self.fake_storage, 'get', mock.Mock(return_value=value)) actual_result = data.get(self.entity_id, key, default_value) self.assertEqual(value, actual_result) self.fake_storage.get.assert_called_once_with( self.entity_id, key, default_value ) def test_delete(self): data = pd.DriverPrivateData(storage=self.fake_storage) key = "fake_key" self.mock_object(self.fake_storage, 'get', mock.Mock(return_value=True)) actual_result = data.delete(self.entity_id, key) self.assertTrue(actual_result) self.fake_storage.delete.assert_called_once_with( self.entity_id, key ) fake_storage_data = { "entity_id": "fake_id", "details": {"foo": "bar"}, "context": "fake_context", "backend_host": "fake_host", "default": "def", "delete_existing": True, "key": "fake_key", } def create_arg_list(key_names): return [fake_storage_data[key] for key in key_names] def create_arg_dict(key_names): return {key: fake_storage_data[key] for key in key_names} @ddt.ddt class SqlStorageDriverTestCase(test.TestCase): @ddt.data( { "method_name": 'update', "method_kwargs": create_arg_dict( ["entity_id", "details", "delete_existing"]), "valid_args": create_arg_list( ["context", "backend_host", "entity_id", "details", "delete_existing"] ) }, { "method_name": 'get', "method_kwargs": create_arg_dict(["entity_id", "key", "default"]), "valid_args": create_arg_list( ["context", "backend_host", "entity_id", "key", "default"]), }, { "method_name": 'delete', "method_kwargs": create_arg_dict(["entity_id", "key"]), "valid_args": create_arg_list( ["context", "backend_host", "entity_id", "key"]), }) @ddt.unpack def test_methods(self, method_kwargs, method_name, valid_args): method = method_name db_method = 'driver_private_data_' + method_name with mock.patch('manila.db.api.' + db_method) as db_method: storage_driver = pd.SqlStorageDriver( context=fake_storage_data['context'], backend_host=fake_storage_data['backend_host']) method = getattr(storage_driver, method) method(**method_kwargs) db_method.assert_called_once_with(*valid_args) manila-2.0.0/manila/tests/fake_notifier.py0000664000567000056710000000470312701407107021700 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import oslo_messaging as messaging from oslo_serialization import jsonutils from manila import rpc NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple( 'Message', ['publisher_id', 'priority', 'event_type', 'payload'], ) class FakeNotifier(object): def __init__(self, transport, publisher_id, serializer=None): self.transport = transport self.publisher_id = publisher_id for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) self._serializer = serializer or messaging.serializer.NoOpSerializer() def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, self._serializer) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) NOTIFICATIONS.append(msg) def stub_notifier(testcase): testcase.mock_object(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) testcase.mock_object(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=serializer)) manila-2.0.0/manila/tests/volume/0000775000567000056710000000000012701407265020031 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/volume/__init__.py0000664000567000056710000000000012701407107022123 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/volume/test_cinder.py0000664000567000056710000002251412701407107022705 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import exceptions as cinder_exception import ddt import mock from manila import context from manila import exception from manila import test from manila.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed, search_opts={}): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes @ddt.ddt class CinderApiTestCase(test.TestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.mock_object(cinder, 'cinderclient', mock.Mock(return_value=self.cinderclient)) self.mock_object(cinder, '_untranslate_volume_summary_view', lambda ctx, vol: vol) self.mock_object(cinder, '_untranslate_snapshot_summary_view', lambda ctx, snap: snap) def test_get(self): volume_id = 'volume_id1' result = self.api.get(self.ctx, volume_id) self.assertEqual(volume_id, result['id']) @ddt.data( {'cinder_e': cinder_exception.NotFound(404), 'manila_e': exception.VolumeNotFound}, {'cinder_e': cinder_exception.BadRequest(400), 'manila_e': exception.InvalidInput}, ) @ddt.unpack def test_get_failed(self, cinder_e, manila_e): cinder.cinderclient.side_effect = cinder_e volume_id = 'volume_id' self.assertRaises(manila_e, self.api.get, self.ctx, volume_id) def test_create(self): result = self.api.create(self.ctx, 1, '', '') self.assertEqual('created_id', result['id']) def test_create_failed(self): cinder.cinderclient.side_effect = cinder_exception.BadRequest(400) self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') def test_create_not_found_error(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) self.assertRaises(exception.NotFound, self.api.create, self.ctx, 1, '', '') def test_create_failed_exception(self): cinder.cinderclient.side_effect = Exception("error msg") self.assertRaises(exception.ManilaException, self.api.create, self.ctx, 1, '', '') def test_get_all(self): cinder._untranslate_volume_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all(self.ctx)) def test_check_attach_volume_status_error(self): volume = {'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'status': 'available'} volume['attach_status'] = "detached" instance = {'availability_zone': 'zone1'} volume['availability_zone'] = 'zone2' cinder.CONF.set_override('cross_az_attach', False, 'cinder') self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' instance = {'availability_zone': 'zone1'} cinder.CONF.set_override('cross_az_attach', False, 'cinder') self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'status': 'available'} self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['status'] = 'non-available' self.assertIsNone(self.api.check_detach(self.ctx, volume)) def test_update(self): fake_volume = {'fake': 'fake'} self.mock_object(self.cinderclient.volumes, 'get', mock.Mock(return_value=fake_volume)) self.mock_object(self.cinderclient.volumes, 'update') fake_volume_id = 'fake_volume' fake_data = {'test': 'test'} self.api.update(self.ctx, fake_volume_id, fake_data) self.cinderclient.volumes.get.assert_called_once_with(fake_volume_id) self.cinderclient.volumes.update.assert_called_once_with(fake_volume, **fake_data) def test_reserve_volume(self): self.mock_object(self.cinderclient.volumes, 'reserve') self.api.reserve_volume(self.ctx, 'id1') self.cinderclient.volumes.reserve.assert_called_once_with('id1') def test_unreserve_volume(self): self.mock_object(self.cinderclient.volumes, 'unreserve') self.api.unreserve_volume(self.ctx, 'id1') self.cinderclient.volumes.unreserve.assert_called_once_with('id1') def test_begin_detaching(self): self.mock_object(self.cinderclient.volumes, 'begin_detaching') self.api.begin_detaching(self.ctx, 'id1') self.cinderclient.volumes.begin_detaching.assert_called_once_with( 'id1') def test_roll_detaching(self): self.mock_object(self.cinderclient.volumes, 'roll_detaching') self.api.roll_detaching(self.ctx, 'id1') self.cinderclient.volumes.roll_detaching.assert_called_once_with('id1') def test_attach(self): self.mock_object(self.cinderclient.volumes, 'attach') self.api.attach(self.ctx, 'id1', 'uuid', 'point') self.cinderclient.volumes.attach.assert_called_once_with('id1', 'uuid', 'point') def test_detach(self): self.mock_object(self.cinderclient.volumes, 'detach') self.api.detach(self.ctx, 'id1') self.cinderclient.volumes.detach.assert_called_once_with('id1') def test_initialize_connection(self): self.mock_object(self.cinderclient.volumes, 'initialize_connection') self.api.initialize_connection(self.ctx, 'id1', 'connector') self.cinderclient.volumes.initialize_connection.\ assert_called_once_with('id1', 'connector') def test_terminate_connection(self): self.mock_object(self.cinderclient.volumes, 'terminate_connection') self.api.terminate_connection(self.ctx, 'id1', 'connector') self.cinderclient.volumes.terminate_connection.\ assert_called_once_with('id1', 'connector') def test_delete(self): self.mock_object(self.cinderclient.volumes, 'delete') self.api.delete(self.ctx, 'id1') self.cinderclient.volumes.delete.assert_called_once_with('id1') def test_get_snapshot(self): snapshot_id = 'snapshot_id1' result = self.api.get_snapshot(self.ctx, snapshot_id) self.assertEqual(snapshot_id, result['id']) def test_get_snapshot_failed(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) snapshot_id = 'snapshot_id' self.assertRaises(exception.VolumeSnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder._untranslate_snapshot_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): result = self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') self.assertEqual('created_id', result['id']) def test_create_force(self): result = self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') self.assertEqual('created_id', result['id']) def test_delete_snapshot(self): self.mock_object(self.cinderclient.volume_snapshots, 'delete') self.api.delete_snapshot(self.ctx, 'id1') self.cinderclient.volume_snapshots.delete.assert_called_once_with( 'id1') manila-2.0.0/manila/tests/utils.py0000664000567000056710000000600412701407107020227 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC # Copyright 2015 Mirantic, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import os from oslo_config import cfg import six from manila import context from manila import utils CONF = cfg.CONF def get_test_admin_context(): return context.get_admin_context() def is_manila_installed(): if os.path.exists('../../manila.manila.egg-info'): return True else: return False def set_timeout(timeout): """Timeout decorator for unit test methods. Use this decorator for tests that are expected to pass in very specific amount of time, not common for all other tests. It can have either big or small value. """ def _decorator(f): @six.wraps(f) def _wrapper(self, *args, **kwargs): self.useFixture(fixtures.Timeout(timeout, gentle=True)) return f(self, *args, **kwargs) return _wrapper return _decorator class create_temp_config_with_opts(object): """Creates temporary config file with provided opts and values. usage: data = {'FOO_GROUP': {'foo_opt': 'foo_value'}} assert CONF.FOO_GROUP.foo_opt != 'foo_value' with create_temp_config_with_opts(data): assert CONF.FOO_GROUP.foo_opt == 'foo_value' assert CONF.FOO_GROUP.foo_opt != 'foo_value' :param data: dict -- expected dict with two layers, first is name of config group and second is opts with values. Example: {'DEFAULT': {'foo_opt': 'foo_v'}, 'BAR_GROUP': {'bar_opt': 'bar_v'}} """ def __init__(self, data): self.data = data def __enter__(self): config_filename = 'fake_config' with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, '%s.conf' % config_filename) with open(tmpfilename, "w") as configfile: for group, opts in self.data.items(): configfile.write("""[%s]\n""" % group) for opt, value in opts.items(): configfile.write( """%(k)s = %(v)s\n""" % {'k': opt, 'v': value}) configfile.write("""\n""") # Add config file with updated opts CONF.default_config_files = [configfile.name] # Reload config instance to use redefined opts CONF.reload_config_files() return CONF def __exit__(self, exc_type, exc_value, exc_traceback): return False # do not suppress errors manila-2.0.0/manila/tests/__init__.py0000664000567000056710000000163512701407107020633 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.tests` -- Manila Unittests ===================================================== .. automodule:: manila.tests :platform: Unix """ import eventlet eventlet.monkey_patch() manila-2.0.0/manila/tests/test_api.py0000664000567000056710000000511612701407107020702 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the API endpoint.""" import six from six.moves import http_client import webob class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse, trivial.""" def __init__(self, response_string): self.response_string = response_string self._buffer = six.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """A fake http_client.HTTPConnection for boto. requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into the http_client.HTTPResponse that boto expects. """ def __init__(self, app, host, is_secure=False): self.app = app self.host = host def request(self, method, path, data, headers): req = webob.Request.blank(path) req.method = method req.body = data req.headers = headers req.headers['Accept'] = 'text/html' req.host = self.host # Call the WSGI app, get the HTTP response resp = str(req.get_response(self.app)) # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp self.sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(self.sock) # NOTE(vish): boto is accessing private variables for some reason self._HTTPConnection__response = self.http_response self.http_response.begin() def getresponse(self): return self.http_response def getresponsebody(self): return self.sock.response_string def close(self): """Required for compatibility with boto/tornado.""" pass manila-2.0.0/manila/tests/fake_service_instance.py0000664000567000056710000000302212701407107023376 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.tests import fake_compute class FakeServiceInstanceManager(object): def __init__(self, *args, **kwargs): self.db = mock.Mock() self._helpers = { 'CIFS': mock.Mock(), 'NFS': mock.Mock(), } self.share_networks_locks = {} self.share_networks_servers = {} self.fake_server = fake_compute.FakeServer() self.service_instance_name_template = 'manila_fake_service_instance-%s' def get_service_instance(self, context, share_network_id, create=True): return self.fake_server def _create_service_instance(self, context, instance_name, share_network_id, old_server_ip): return self.fake_server def _delete_server(self, context, server): pass def _get_service_instance_name(self, share_network_id): return self.service_instance_name_template % share_network_id manila-2.0.0/manila/tests/test_hacking.py0000664000567000056710000002354312701407112021535 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import textwrap import mock import pep8 import six import testtools from manila.hacking import checks from manila import test class HackingTestCase(test.TestCase): """Hacking test cases This class tests the hacking checks in manila.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def test_no_translate_debug_logs(self): self.assertEqual(1, len(list(checks.no_translate_debug_logs( "LOG.debug(_('foo'))", "manila/scheduler/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.debug('foo')", "manila/scheduler/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.info(_('foo'))", "manila/scheduler/foo.py")))) def test_check_explicit_underscore_import(self): self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder/tests/other_files.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _", "cinder/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _LE, _, _LW", "cinder/tests/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "cinder/tests/other_files3.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder/tests/other_files3.py")))) # Complete code coverage by falling through all checks self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info('My info message')", "cinder.tests.unit/other_files4.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _LW", "cinder.tests.unit/other_files5.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files5.py")))) # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) @testtools.skipIf(six.PY3, "It is PY2-specific. Skip it for PY3.") def test_str_exception(self): checker = checks.CheckForStrExc code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = str(e) return p """ errors = [(5, 16, 'M325')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: p = unicode(e) return p """ errors = [] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): try: p = str(a) + str(b) except ValueError as e: try: p = unicode(a) + unicode(b) except ValueError as ve: p = str(e) + str(ve) p = unicode(e) return p """ errors = [(8, 20, 'M325'), (8, 29, 'M325')] self._assert_has_errors(code, checker, expected_errors=errors) def test_trans_add(self): checker = checks.CheckForTransAdd code = """ def fake_tran(msg): return msg _ = fake_tran _LI = _ _LW = _ _LE = _ _LC = _ def f(a, b): msg = _('test') + 'add me' msg = _LI('test') + 'add me' msg = _LW('test') + 'add me' msg = _LE('test') + 'add me' msg = _LC('test') + 'add me' msg = 'add to me' + _('test') return msg """ # Python 3.4.0 introduced a change to the column calculation during AST # parsing. This was reversed in Python 3.4.3, hence the version-based # expected value calculation. See #1499743 for more background. if sys.version_info < (3, 4, 0) or sys.version_info >= (3, 4, 3): errors = [(13, 10, 'M326'), (14, 10, 'M326'), (15, 10, 'M326'), (16, 10, 'M326'), (17, 10, 'M326'), (18, 24, 'M326')] else: errors = [(13, 11, 'M326'), (14, 13, 'M326'), (15, 13, 'M326'), (16, 13, 'M326'), (17, 13, 'M326'), (18, 25, 'M326')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): msg = 'test' + 'add me' return msg """ errors = [] self._assert_has_errors(code, checker, expected_errors=errors) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) manila-2.0.0/manila/tests/cmd/0000775000567000056710000000000012701407265017265 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/cmd/test_manage.py0000664000567000056710000003535312701407107022132 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import code import readline import sys import ddt import mock from oslo_config import cfg import six from manila.cmd import manage as manila_manage from manila import context from manila import db from manila.db import migration from manila import test from manila import version CONF = cfg.CONF @ddt.ddt class ManilaCmdManageTestCase(test.TestCase): def setUp(self): super(ManilaCmdManageTestCase, self).setUp() sys.argv = ['manila-share'] CONF(sys.argv[1:], project='manila', version=version.version_string()) self.shell_commands = manila_manage.ShellCommands() self.host_commands = manila_manage.HostCommands() self.db_commands = manila_manage.DbCommands() self.version_commands = manila_manage.VersionCommands() self.config_commands = manila_manage.ConfigCommands() self.get_log_cmds = manila_manage.GetLogCommands() self.service_cmds = manila_manage.ServiceCommands() def test_param2id_is_uuid_like(self): obj_id = '12345678123456781234567812345678' self.assertEqual(obj_id, manila_manage.param2id(obj_id)) def test_param2id_not_uuid_like_with_dash(self): obj_id = '112-112-112' self.assertIsNone(manila_manage.param2id(obj_id)) def test_param2id_not_uuid_like_without_dash(self): obj_id = '123' self.assertEqual(123, manila_manage.param2id(obj_id)) def test_param2id_not_uuid_like_value_error(self): obj_id = 'invalidvalue' self.assertRaises(ValueError, manila_manage.param2id, obj_id) @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_bpython(self): self.shell_commands.bpython() manila_manage.ShellCommands.run.assert_called_once_with('bpython') @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_ipython(self): self.shell_commands.ipython() manila_manage.ShellCommands.run.assert_called_once_with('ipython') @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_python(self): self.shell_commands.python() manila_manage.ShellCommands.run.assert_called_once_with('python') @ddt.data({}, {'shell': 'bpython'}) def test_run_bpython(self, kwargs): try: import bpython except ImportError as e: self.skipTest(six.text_type(e)) self.mock_object(bpython, 'embed') self.shell_commands.run(**kwargs) bpython.embed.assert_called_once_with() def test_run_bpython_import_error(self): try: import bpython import IPython except ImportError as e: self.skipTest(six.text_type(e)) self.mock_object(bpython, 'embed', mock.Mock(side_effect=ImportError())) self.mock_object(IPython, 'embed') self.shell_commands.run(shell='bpython') IPython.embed.assert_called_once_with() def test_run(self): try: import bpython except ImportError as e: self.skipTest(six.text_type(e)) self.mock_object(bpython, 'embed') self.shell_commands.run() bpython.embed.assert_called_once_with() def test_run_ipython(self): try: import IPython except ImportError as e: self.skipTest(six.text_type(e)) self.mock_object(IPython, 'embed') self.shell_commands.run(shell='ipython') IPython.embed.assert_called_once_with() def test_run_ipython_import_error(self): try: import IPython if not hasattr(IPython, 'Shell'): setattr(IPython, 'Shell', mock.Mock()) setattr(IPython.Shell, 'IPShell', mock.Mock(side_effect=ImportError())) except ImportError as e: self.skipTest(six.text_type(e)) self.mock_object(IPython, 'embed', mock.Mock(side_effect=ImportError())) self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') shell = IPython.embed.return_value self.shell_commands.run(shell='ipython') IPython.Shell.IPShell.assert_called_once_with(argv=[]) self.assertFalse(shell.mainloop.called) self.assertTrue(readline.parse_and_bind.called) code.interact.assert_called_once_with() def test_run_python(self): self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') self.shell_commands.run(shell='python') readline.parse_and_bind.assert_called_once_with("tab:complete") code.interact.assert_called_once_with() def test_run_python_import_error(self): self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') self.shell_commands.run(shell='python') readline.parse_and_bind.assert_called_once_with("tab:complete") code.interact.assert_called_once_with() @mock.patch('six.moves.builtins.print') def test_list(self, print_mock): serv_1 = { 'host': 'fake_host1', 'availability_zone': {'name': 'avail_zone1'}, } serv_2 = { 'host': 'fake_host2', 'availability_zone': {'name': 'avail_zone2'}, } self.mock_object(db, 'service_get_all', mock.Mock(return_value=[serv_1, serv_2])) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.host_commands.list(zone='avail_zone1') context.get_admin_context.assert_called_once_with() db.service_get_all.assert_called_once_with('admin_ctxt') print_mock.assert_has_calls([ mock.call(u'host \tzone '), mock.call('fake_host1 \tavail_zone1 ')]) @mock.patch('six.moves.builtins.print') def test_list_zone_is_none(self, print_mock): serv_1 = { 'host': 'fake_host1', 'availability_zone': {'name': 'avail_zone1'}, } serv_2 = { 'host': 'fake_host2', 'availability_zone': {'name': 'avail_zone2'}, } self.mock_object(db, 'service_get_all', mock.Mock(return_value=[serv_1, serv_2])) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.host_commands.list() context.get_admin_context.assert_called_once_with() db.service_get_all.assert_called_once_with('admin_ctxt') print_mock.assert_has_calls([ mock.call(u'host \tzone '), mock.call('fake_host1 \tavail_zone1 '), mock.call('fake_host2 \tavail_zone2 ')]) def test_sync(self): self.mock_object(migration, 'upgrade') self.db_commands.sync(version='123') migration.upgrade.assert_called_once_with('123') def test_version(self): self.mock_object(migration, 'version') self.db_commands.version() migration.version.assert_called_once_with() def test_downgrade(self): self.mock_object(migration, 'downgrade') self.db_commands.downgrade(version='123') migration.downgrade.assert_called_once_with('123') def test_revision(self): self.mock_object(migration, 'revision') self.db_commands.revision('message', True) migration.revision.assert_called_once_with('message', True) def test_stamp(self): self.mock_object(migration, 'stamp') self.db_commands.stamp(version='123') migration.stamp.assert_called_once_with('123') def test_version_commands_list(self): self.mock_object(version, 'version_string', mock.Mock(return_value='123')) with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: self.version_commands.list() version.version_string.assert_called_once_with() self.assertEqual('123\n', fake_out.getvalue()) def test_version_commands_call(self): self.mock_object(version, 'version_string', mock.Mock(return_value='123')) with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: self.version_commands() version.version_string.assert_called_once_with() self.assertEqual('123\n', fake_out.getvalue()) def test_get_log_commands_no_errors(self): with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: CONF.set_override('log_dir', None) expected_out = 'No errors in logfiles!\n' self.get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('six.moves.builtins.open') @mock.patch('os.listdir') def test_get_log_commands_errors(self, listdir, open): CONF.set_override('log_dir', 'fake-dir') listdir.return_value = ['fake-error.log'] with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: open.return_value = six.StringIO( '[ ERROR ] fake-error-message') expected_out = ('fake-dir/fake-error.log:-\n' 'Line 1 : [ ERROR ] fake-error-message\n') self.get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) open.assert_called_once_with('fake-dir/fake-error.log', 'r') listdir.assert_called_once_with(CONF.log_dir) @mock.patch('six.moves.builtins.open') @mock.patch('os.path.exists') def test_get_log_commands_syslog_no_log_file(self, path_exists, open): path_exists.return_value = False exit = self.assertRaises(SystemExit, self.get_log_cmds.syslog) self.assertEqual(1, exit.code) path_exists.assert_any_call('/var/log/syslog') path_exists.assert_any_call('/var/log/messages') @mock.patch('manila.utils.service_is_up') @mock.patch('manila.db.service_get_all') @mock.patch('manila.context.get_admin_context') def test_service_commands_list(self, get_admin_context, service_get_all, service_is_up): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt service = {'binary': 'manila-binary', 'host': 'fake-host.fake-domain', 'availability_zone': {'name': 'fake-zone'}, 'updated_at': '2014-06-30 11:22:33', 'disabled': False} service_get_all.return_value = [service] service_is_up.return_value = True with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: format = "%-16s %-36s %-16s %-10s %-5s %-10s" print_format = format % ('Binary', 'Host', 'Zone', 'Status', 'State', 'Updated At') service_format = format % (service['binary'], service['host'].partition('.')[0], service['availability_zone']['name'], 'enabled', ':-)', service['updated_at']) expected_out = print_format + '\n' + service_format + '\n' self.service_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) get_admin_context.assert_called_with() service_get_all.assert_called_with(ctxt) service_is_up.assert_called_with(service) def test_methods_of(self): obj = type('Fake', (object,), {name: lambda: 'fake_' for name in ('_a', 'b', 'c')}) expected = [('b', obj.b), ('c', obj.c)] self.assertEqual(expected, manila_manage.methods_of(obj)) @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_argv_lt_2(self, register_cli_opt): script_name = 'manila-manage' sys.argv = [script_name] CONF(sys.argv[1:], project='manila', version=version.version_string()) exit = self.assertRaises(SystemExit, manila_manage.main) self.assertTrue(register_cli_opt.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.register_options') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_sudo_failed(self, register_cli_opt, log_setup, register_log_opts, config_opts_call): script_name = 'manila-manage' sys.argv = [script_name, 'fake_category', 'fake_action'] config_opts_call.side_effect = cfg.ConfigFilesNotFoundError( mock.sentinel._namespace) exit = self.assertRaises(SystemExit, manila_manage.main) self.assertTrue(register_cli_opt.called) register_log_opts.assert_called_once_with(CONF) config_opts_call.assert_called_once_with( sys.argv[1:], project='manila', version=version.version_string()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') @mock.patch('oslo_log.log.register_options') def test_main(self, register_log_opts, register_cli_opt, config_opts_call): script_name = 'manila-manage' sys.argv = [script_name, 'config', 'list'] action_fn = mock.MagicMock() CONF.category = mock.MagicMock(action_fn=action_fn) manila_manage.main() self.assertTrue(register_cli_opt.called) register_log_opts.assert_called_once_with(CONF) config_opts_call.assert_called_once_with( sys.argv[1:], project='manila', version=version.version_string()) self.assertTrue(action_fn.called) @ddt.data('bar', '-bar', '--bar') def test_get_arg_string(self, arg): parsed_arg = manila_manage.get_arg_string(arg) self.assertEqual('bar', parsed_arg) manila-2.0.0/manila/tests/cmd/__init__.py0000664000567000056710000000000012701407107021357 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/cmd/test_api.py0000664000567000056710000000352512701407107021447 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import api as manila_api from manila import test from manila import version CONF = manila_api.CONF class ManilaCmdApiTestCase(test.TestCase): def setUp(self): super(ManilaCmdApiTestCase, self).setUp() sys.argv = ['manila-api'] def test_main(self): self.mock_object(manila_api.log, 'setup') self.mock_object(manila_api.log, 'register_options') self.mock_object(manila_api.utils, 'monkey_patch') self.mock_object(manila_api.service, 'process_launcher') self.mock_object(manila_api.service, 'WSGIService') manila_api.main() process_launcher = manila_api.service.process_launcher process_launcher.assert_called_once_with() self.assertTrue(process_launcher.return_value.launch_service.called) self.assertTrue(process_launcher.return_value.wait.called) self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_api.log.setup.assert_called_once_with(CONF, "manila") manila_api.log.register_options.assert_called_once_with(CONF) manila_api.utils.monkey_patch.assert_called_once_with() manila_api.service.WSGIService.assert_called_once_with('osapi_share') manila-2.0.0/manila/tests/cmd/test_data.py0000664000567000056710000000344512701407107021610 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import data as manila_data from manila import test from manila import version CONF = manila_data.CONF class ManilaCmdDataTestCase(test.TestCase): def test_main(self): sys.argv = ['manila-data'] self.mock_object(manila_data.log, 'setup') self.mock_object(manila_data.log, 'register_options') self.mock_object(manila_data.utils, 'monkey_patch') self.mock_object(manila_data.service.Service, 'create') self.mock_object(manila_data.service, 'serve') self.mock_object(manila_data.service, 'wait') manila_data.main() self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_data.log.setup.assert_called_once_with(CONF, "manila") manila_data.log.register_options.assert_called_once_with(CONF) manila_data.utils.monkey_patch.assert_called_once_with() manila_data.service.Service.create.assert_called_once_with( binary='manila-data') manila_data.service.wait.assert_called_once_with() manila_data.service.serve.assert_called_once_with( manila_data.service.Service.create.return_value) manila-2.0.0/manila/tests/cmd/test_share.py0000664000567000056710000000504112701407107021773 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import ddt import mock from manila.cmd import share as manila_share from manila import test CONF = manila_share.CONF @ddt.ddt class ManilaCmdShareTestCase(test.TestCase): @ddt.data(None, [], ['foo', ], ['foo', 'bar', ]) def test_main(self, backends): self.mock_object(manila_share.log, 'setup') self.mock_object(manila_share.log, 'register_options') self.mock_object(manila_share.utils, 'monkey_patch') self.mock_object(manila_share.service, 'process_launcher') self.mock_object(manila_share.service.Service, 'create') self.launcher = manila_share.service.process_launcher.return_value self.mock_object(self.launcher, 'launch_service') self.mock_object(self.launcher, 'wait') self.server = manila_share.service.Service.create.return_value fake_host = 'fake_host' CONF.set_override('enabled_share_backends', backends) CONF.set_override('host', fake_host) sys.argv = ['manila-share'] manila_share.main() manila_share.log.setup.assert_called_once_with(CONF, "manila") manila_share.log.register_options.assert_called_once_with(CONF) manila_share.utils.monkey_patch.assert_called_once_with() manila_share.service.process_launcher.assert_called_once_with() self.launcher.wait.assert_called_once_with() if backends: manila_share.service.Service.create.assert_has_calls([ mock.call( host=fake_host + '@' + backend, service_name=backend, binary='manila-share') for backend in backends ]) self.launcher.launch_service.assert_has_calls([ mock.call(self.server) for backend in backends]) else: manila_share.service.Service.create.assert_called_once_with( binary='manila-share') self.launcher.launch_service.assert_called_once_with(self.server) manila-2.0.0/manila/tests/cmd/test_scheduler.py0000664000567000056710000000356712701407107022662 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import scheduler as manila_scheduler from manila import test from manila import version CONF = manila_scheduler.CONF class ManilaCmdSchedulerTestCase(test.TestCase): def test_main(self): sys.argv = ['manila-scheduler'] self.mock_object(manila_scheduler.log, 'setup') self.mock_object(manila_scheduler.log, 'register_options') self.mock_object(manila_scheduler.utils, 'monkey_patch') self.mock_object(manila_scheduler.service.Service, 'create') self.mock_object(manila_scheduler.service, 'serve') self.mock_object(manila_scheduler.service, 'wait') manila_scheduler.main() self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_scheduler.log.setup.assert_called_once_with(CONF, "manila") manila_scheduler.log.register_options.assert_called_once_with(CONF) manila_scheduler.utils.monkey_patch.assert_called_once_with() manila_scheduler.service.Service.create.assert_called_once_with( binary='manila-scheduler') manila_scheduler.service.wait.assert_called_once_with() manila_scheduler.service.serve.assert_called_once_with( manila_scheduler.service.Service.create.return_value) manila-2.0.0/manila/tests/cmd/test_all.py0000664000567000056710000000534112701407107021444 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import ddt import mock from oslo_log import log from manila.cmd import all as manila_all from manila import service from manila import test from manila import utils from manila import version CONF = manila_all.CONF @ddt.ddt class ManilaCmdAllTestCase(test.TestCase): def setUp(self): super(ManilaCmdAllTestCase, self).setUp() sys.argv = ['manila-all'] self.mock_object(log, 'setup') self.mock_object(log, 'register_options') self.mock_object(log, 'getLogger') self.mock_object(utils, 'monkey_patch') self.mock_object(service, 'process_launcher') self.mock_object(service, 'WSGIService') self.mock_object(service.Service, 'create') self.wsgi_service = service.WSGIService.return_value self.service = service.Service.create.return_value self.fake_log = log.getLogger.return_value def _common_checks(self): self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) log.setup.assert_called_once_with(CONF, "manila") log.register_options.assert_called_once_with(CONF) log.getLogger.assert_called_once_with('manila.all') utils.monkey_patch.assert_called_once_with() service.process_launcher.assert_called_once_with() service.WSGIService.assert_called_once_with('osapi_share') def test_main(self): manila_all.main() self._common_checks() self.assertFalse(self.fake_log.exception.called) self.assertTrue( service.process_launcher.return_value.launch_service.called) self.assertTrue(service.process_launcher.return_value.wait.called) @ddt.data( *[(exc, exc_in_wsgi) for exc in (Exception(), SystemExit()) for exc_in_wsgi in (True, False)] ) @ddt.unpack def test_main_raise_exception(self, exc, exc_in_wsgi): if exc_in_wsgi: service.WSGIService.side_effect = exc else: service.Service.create.side_effect = exc manila_all.main() self._common_checks() self.fake_log.exception.assert_has_calls([mock.ANY]) manila-2.0.0/manila/tests/test_utils.py0000664000567000056710000010301712701407107021270 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2014 NetApp, Inc. # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import errno import os import os.path import socket import tempfile import time import uuid import ddt import mock from oslo_config import cfg from oslo_utils import timeutils import paramiko from six.moves import builtins import manila from manila.common import constants from manila import context from manila.db import api as db from manila import exception from manila import test from manila import utils CONF = cfg.CONF class GetFromPathTestCase(test.TestCase): def test_tolerates_nones(self): f = utils.get_from_path input = [] self.assertEqual([], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [None] self.assertEqual([], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': None}] self.assertEqual([], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': None}}] self.assertEqual([{'b': None}], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}] self.assertEqual([{'b': {'c': None}}], f(input, "a")) self.assertEqual([{'c': None}], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': None}] self.assertEqual([{'b': {'c': None}}], f(input, "a")) self.assertEqual([{'c': None}], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a")) self.assertEqual([{'c': None}], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) def test_does_select(self): f = utils.get_from_path input = [{'a': 'a_1'}] self.assertEqual(['a_1'], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': 'b_1'}}] self.assertEqual([{'b': 'b_1'}], f(input, "a")) self.assertEqual(['b_1'], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}] self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a")) self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a")) self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': None}}] self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) self.assertEqual([{'c': 'c_1'}], f(input, "a/b")) self.assertEqual(['c_1'], f(input, "a/b/c")) input = [{'a': {'b': {'c': 'c_1'}}}, {'a': {'b': {'c': 'c_2'}}}] self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], f(input, "a")) self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c")) self.assertEqual([], f(input, "a/b/c/d")) self.assertEqual([], f(input, "c/a/b/d")) self.assertEqual([], f(input, "i/r/t")) def test_flattens_lists(self): f = utils.get_from_path input = [{'a': [1, 2, 3]}] self.assertEqual([1, 2, 3], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}] self.assertEqual([{'b': [1, 2, 3]}], f(input, "a")) self.assertEqual([1, 2, 3], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = [{'a': [1, 2, {'b': 'b_1'}]}] self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a")) self.assertEqual(['b_1'], f(input, "a/b")) def test_bad_xpath(self): f = utils.get_from_path self.assertRaises(exception.Error, f, [], None) self.assertRaises(exception.Error, f, [], "") self.assertRaises(exception.Error, f, [], "/") self.assertRaises(exception.Error, f, [], "/a") self.assertRaises(exception.Error, f, [], "/a/") self.assertRaises(exception.Error, f, [], "//") self.assertRaises(exception.Error, f, [], "//a") self.assertRaises(exception.Error, f, [], "a//a") self.assertRaises(exception.Error, f, [], "a//a/") self.assertRaises(exception.Error, f, [], "a/a/") def test_real_failure1(self): # Real world failure case... # We weren't coping when the input was a Dictionary instead of a List # This led to test_accepts_dictionaries f = utils.get_from_path inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], 'address': '192.168.0.3'}, 'hostname': ''} private_ips = f(inst, 'fixed_ip/address') public_ips = f(inst, 'fixed_ip/floating_ips/address') self.assertEqual(['192.168.0.3'], private_ips) self.assertEqual(['1.2.3.4'], public_ips) def test_accepts_dictionaries(self): f = utils.get_from_path input = {'a': [1, 2, 3]} self.assertEqual([1, 2, 3], f(input, "a")) self.assertEqual([], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = {'a': {'b': [1, 2, 3]}} self.assertEqual([{'b': [1, 2, 3]}], f(input, "a")) self.assertEqual([1, 2, 3], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b")) self.assertEqual([], f(input, "a/b/c")) input = {'a': [1, 2, {'b': 'b_1'}]} self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a")) self.assertEqual(['b_1'], f(input, "a/b")) @ddt.ddt class GenericUtilsTestCase(test.TestCase): def test_read_cached_file(self): cache_data = {"data": 1123, "mtime": 1} with mock.patch.object(os.path, "getmtime", mock.Mock(return_value=1)): data = utils.read_cached_file("/this/is/a/fake", cache_data) self.assertEqual(cache_data["data"], data) os.path.getmtime.assert_called_once_with("/this/is/a/fake") def test_read_modified_cached_file(self): with mock.patch.object(os.path, "getmtime", mock.Mock(return_value=2)): fake_contents = "lorem ipsum" fake_file = mock.Mock() fake_file.read = mock.Mock(return_value=fake_contents) fake_context_manager = mock.Mock() fake_context_manager.__enter__ = mock.Mock(return_value=fake_file) fake_context_manager.__exit__ = mock.Mock() with mock.patch.object( builtins, 'open', mock.Mock(return_value=fake_context_manager)): cache_data = {"data": 1123, "mtime": 1} self.reload_called = False def test_reload(reloaded_data): self.assertEqual(fake_contents, reloaded_data) self.reload_called = True data = utils.read_cached_file("/this/is/a/fake", cache_data, reload_func=test_reload) self.assertEqual(fake_contents, data) self.assertTrue(self.reload_called) fake_file.read.assert_called_once_with() fake_context_manager.__enter__.assert_any_call() builtins.open.assert_called_once_with("/this/is/a/fake") os.path.getmtime.assert_called_once_with("/this/is/a/fake") def test_read_file_as_root(self): def fake_execute(*args, **kwargs): if args[1] == 'bad': raise exception.ProcessExecutionError return 'fakecontents', None self.mock_object(utils, 'execute', fake_execute) contents = utils.read_file_as_root('good') self.assertEqual('fakecontents', contents) self.assertRaises(exception.FileNotFound, utils.read_file_as_root, 'bad') def test_temporary_chown(self): def fake_execute(*args, **kwargs): if args[0] == 'chown': fake_execute.uid = args[1] self.mock_object(utils, 'execute', fake_execute) with tempfile.NamedTemporaryFile() as f: with utils.temporary_chown(f.name, owner_uid=2): self.assertEqual(2, fake_execute.uid) self.assertEqual(fake_execute.uid, os.getuid()) def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 5 self.flags(service_down_time=down_time) with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Up (equal) service = {'updated_at': fts_func(fake_now - down_time), 'created_at': fts_func(fake_now - down_time)} result = utils.service_is_up(service) self.assertTrue(result) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Up service = {'updated_at': fts_func(fake_now - down_time + 1), 'created_at': fts_func(fake_now - down_time + 1)} result = utils.service_is_up(service) self.assertTrue(result) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Down service = {'updated_at': fts_func(fake_now - down_time - 1), 'created_at': fts_func(fake_now - down_time - 1)} result = utils.service_is_up(service) self.assertFalse(result) timeutils.utcnow.assert_called_once_with() def test_is_ipv6_configured0(self): fake_fd = mock.Mock() fake_fd.read.return_value = 'test' with mock.patch('six.moves.builtins.open', mock.Mock(return_value=fake_fd)) as open: self.assertTrue(utils.is_ipv6_configured()) open.assert_called_once_with('/proc/net/if_inet6') fake_fd.read.assert_called_once_with(32) def test_is_ipv6_configured1(self): fake_fd = mock.Mock() fake_fd.read.return_value = '' with mock.patch( 'six.moves.builtins.open', mock.Mock(return_value=fake_fd)): self.assertFalse(utils.is_ipv6_configured()) def test_is_ipv6_configured2(self): with mock.patch('six.moves.builtins.open', mock.Mock(side_effect=IOError( errno.ENOENT, 'Fake no such file error.'))): self.assertFalse(utils.is_ipv6_configured()) def test_is_ipv6_configured3(self): with mock.patch('six.moves.builtins.open', mock.Mock(side_effect=IOError( errno.EPERM, 'Fake no such file error.'))): self.assertRaises(IOError, utils.is_ipv6_configured) def test_is_eventlet_bug105(self): fake_dns = mock.Mock() fake_dns.getaddrinfo.side_effect = socket.gaierror(errno.EBADF) with mock.patch.dict('sys.modules', { 'eventlet.support.greendns': fake_dns}): self.assertTrue(utils.is_eventlet_bug105()) self.assertTrue(fake_dns.getaddrinfo.called) def test_is_eventlet_bug105_neg(self): fake_dns = mock.Mock() fake_dns.getaddrinfo.return_value = [ (socket.AF_INET6, socket.SOCK_STREAM, 0, '', (u'127.0.0.1', 80)), ] with mock.patch.dict('sys.modules', { 'eventlet.support.greendns': fake_dns}): self.assertFalse(utils.is_eventlet_bug105()) fake_dns.getaddrinfo.assert_called_once_with('::1', 80) @ddt.data(['ssh', '-D', 'my_name@name_of_remote_computer'], ['echo', '"quoted arg with space"'], ['echo', "'quoted arg with space'"]) def test_check_ssh_injection(self, cmd): cmd_list = cmd self.assertIsNone(utils.check_ssh_injection(cmd_list)) @ddt.data(['ssh', 'my_name@ name_of_remote_computer'], ['||', 'my_name@name_of_remote_computer'], ['cmd', 'virus;ls'], ['cmd', '"arg\"withunescaped"'], ['cmd', 'virus;"quoted argument"'], ['echo', '"quoted argument";rm -rf'], ['echo', "'quoted argument `rm -rf`'"], ['echo', '"quoted";virus;"quoted"'], ['echo', '"quoted";virus;\'quoted\'']) def test_check_ssh_injection_on_error0(self, cmd): self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, cmd) @ddt.data( (("3G", "G"), 3.0), (("4.1G", "G"), 4.1), (("5.23G", "G"), 5.23), (("9728M", "G"), 9.5), (("8192K", "G"), 0.0078125), (("2T", "G"), 2048.0), (("2.1T", "G"), 2150.4), (("3P", "G"), 3145728.0), (("3.4P", "G"), 3565158.4), (("9728M", "M"), 9728.0), (("9728.2381T", "T"), 9728.2381), (("0", "G"), 0.0), (("512", "M"), 0.00048828125), (("2097152.", "M"), 2.0), ((".1024", "K"), 0.0001), (("2048G", "T"), 2.0), (("65536G", "P"), 0.0625), ) @ddt.unpack def test_translate_string_size_to_float_positive(self, request, expected): actual = utils.translate_string_size_to_float(*request) self.assertEqual(expected, actual) @ddt.data( (None, "G"), ("fake", "G"), ("1fake", "G"), ("2GG", "G"), ("1KM", "G"), ("K1M", "G"), ("M1K", "G"), ("", "G"), (23, "G"), (23.0, "G"), ) @ddt.unpack def test_translate_string_size_to_float_negative(self, string, multiplier): actual = utils.translate_string_size_to_float(string, multiplier) self.assertIsNone(actual) class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'manila.tests.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() manila.tests.monkey_patch_example.CALLED_FUNCTION = [] from manila.tests.monkey_patch_example import example_a from manila.tests.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(8, ret_a) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(8, ret_b) package_a = self.example_package + 'example_a.' self.assertTrue(package_a + 'example_function_a' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertTrue(package_a + 'ExampleClassA.example_method_add' in manila.tests.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertFalse(package_b + 'example_function_b' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method' in manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertFalse(package_b + 'ExampleClassB.example_method_add' in manila.tests.monkey_patch_example.CALLED_FUNCTION) class FakeSSHClient(object): def __init__(self): self.id = uuid.uuid4() self.transport = FakeTransport() def set_missing_host_key_policy(self, policy): pass def connect(self, ip, port=22, username=None, password=None, key_filename=None, look_for_keys=None, timeout=10): pass def get_transport(self): return self.transport def close(self): pass def __call__(self, *args, **kwargs): pass class FakeSock(object): def settimeout(self, timeout): pass class FakeTransport(object): def __init__(self): self.active = True self.sock = FakeSock() def set_keepalive(self, timeout): pass def is_active(self): return self.active class SSHPoolTestCase(test.TestCase): """Unit test for SSH Connection Pool.""" def test_single_ssh_connect(self): with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id self.assertEqual(first_id, second_id) paramiko.SSHClient.assert_called_once_with() def test_create_ssh_with_password(self): fake_ssh_client = mock.Mock() ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password="test", key_filename=None, look_for_keys=False, timeout=10) def test_create_ssh_with_key(self): path_to_private_key = "/fakepath/to/privatekey" fake_ssh_client = mock.Mock() ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test", privatekey="/fakepath/to/privatekey") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=path_to_private_key, look_for_keys=False, timeout=10) def test_create_ssh_with_nothing(self): fake_ssh_client = mock.Mock() ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=None, look_for_keys=True, timeout=10) def test_create_ssh_error_connecting(self): attrs = {'connect.side_effect': paramiko.SSHException, } fake_ssh_client = mock.Mock(**attrs) ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): self.assertRaises(exception.SSHException, ssh_pool.create) fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=None, look_for_keys=True, timeout=10) def test_closed_reopend_ssh_connections(self): with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=2) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id # Close the connection and test for a new connection ssh.get_transport().active = False self.assertEqual(first_id, second_id) paramiko.SSHClient.assert_called_once_with() # Expected new ssh pool with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): with sshpool.item() as ssh: third_id = ssh.id self.assertNotEqual(first_id, third_id) paramiko.SSHClient.assert_called_once_with() class CidrToNetmaskTestCase(test.TestCase): """Unit test for cidr to netmask.""" def test_cidr_to_netmask_01(self): cidr = '10.0.0.0/0' expected_netmask = '0.0.0.0' result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) def test_cidr_to_netmask_02(self): cidr = '10.0.0.0/24' expected_netmask = '255.255.255.0' result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) def test_cidr_to_netmask_03(self): cidr = '10.0.0.0/5' expected_netmask = '248.0.0.0' result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) def test_cidr_to_netmask_04(self): cidr = '10.0.0.0/32' expected_netmask = '255.255.255.255' result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) def test_cidr_to_netmask_05(self): cidr = '10.0.0.1' expected_netmask = '255.255.255.255' result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) def test_cidr_to_netmask_invalid_01(self): cidr = '10.0.0.0/33' self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr) def test_cidr_to_netmask_invalid_02(self): cidr = '' self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr) def test_cidr_to_netmask_invalid_03(self): cidr = '10.0.0.0/33' self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr) def test_cidr_to_netmask_invalid_04(self): cidr = '10.0.0.555/33' self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr) @ddt.ddt class IsValidIPVersion(test.TestCase): """Test suite for function 'is_valid_ip_address'.""" @ddt.data('0.0.0.0', '255.255.255.255', '192.168.0.1') def test_valid_v4(self, addr): for vers in (4, '4'): self.assertTrue(utils.is_valid_ip_address(addr, vers)) @ddt.data( '2001:cdba:0000:0000:0000:0000:3257:9652', '2001:cdba:0:0:0:0:3257:9652', '2001:cdba::3257:9652') def test_valid_v6(self, addr): for vers in (6, '6'): self.assertTrue(utils.is_valid_ip_address(addr, vers)) @ddt.data( {'addr': '1.1.1.1', 'vers': 3}, {'addr': '1.1.1.1', 'vers': 5}, {'addr': '1.1.1.1', 'vers': 7}, {'addr': '2001:cdba::3257:9652', 'vers': '3'}, {'addr': '2001:cdba::3257:9652', 'vers': '5'}, {'addr': '2001:cdba::3257:9652', 'vers': '7'}) @ddt.unpack def test_provided_invalid_version(self, addr, vers): self.assertRaises( exception.ManilaException, utils.is_valid_ip_address, addr, vers) def test_provided_none_version(self): self.assertRaises(TypeError, utils.is_valid_ip_address, '', None) @ddt.data(None, 'fake', '1.1.1.1') def test_provided_invalid_v6_address(self, addr): for vers in (6, '6'): self.assertFalse(utils.is_valid_ip_address(addr, vers)) @ddt.data(None, 'fake', '255.255.255.256', '2001:cdba::3257:9652') def test_provided_invalid_v4_address(self, addr): for vers in (4, '4'): self.assertFalse(utils.is_valid_ip_address(addr, vers)) class Comparable(utils.ComparableMixin): def __init__(self, value): self.value = value def _cmpkey(self): return self.value class TestComparableMixin(test.TestCase): def setUp(self): super(TestComparableMixin, self).setUp() self.one = Comparable(1) self.two = Comparable(2) def test_lt(self): self.assertTrue(self.one < self.two) self.assertFalse(self.two < self.one) self.assertFalse(self.one < self.one) def test_le(self): self.assertTrue(self.one <= self.two) self.assertFalse(self.two <= self.one) self.assertTrue(self.one <= self.one) def test_eq(self): self.assertFalse(self.one == self.two) self.assertFalse(self.two == self.one) self.assertTrue(self.one == self.one) def test_ge(self): self.assertFalse(self.one >= self.two) self.assertTrue(self.two >= self.one) self.assertTrue(self.one >= self.one) def test_gt(self): self.assertFalse(self.one > self.two) self.assertTrue(self.two > self.one) self.assertFalse(self.one > self.one) def test_ne(self): self.assertTrue(self.one != self.two) self.assertTrue(self.two != self.one) self.assertFalse(self.one != self.one) def test_compare(self): self.assertEqual(NotImplemented, self.one._compare(1, self.one._cmpkey)) class TestRetryDecorator(test.TestCase): def setUp(self): super(TestRetryDecorator, self).setUp() def test_no_retry_required(self): self.counter = 0 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_no_retry_required_random(self): self.counter = 0 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException, interval=2, retries=3, backoff_rate=2, wait_random=True) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_retries_once_random(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException, interval, retries, backoff_rate, wait_random=True) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.ManilaException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) self.assertTrue(mock_sleep.called) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException, interval, retries, backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.ManilaException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) mock_sleep.assert_called_with(interval * backoff_rate) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exception.ManilaException(data='fake') self.assertRaises(exception.ManilaException, always_fails) self.assertEqual(retries, self.counter) expected_sleep_arg = [] for i in range(retries): if i > 0: interval *= backoff_rate expected_sleep_arg.append(float(interval)) mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg)) def test_wrong_exception_no_retry(self): with mock.patch.object(time, 'sleep') as mock_sleep: @utils.retry(exception.ManilaException) def raise_unexpected_error(): raise ValueError("value error") self.assertRaises(ValueError, raise_unexpected_error) self.assertFalse(mock_sleep.called) @ddt.ddt class RequireDriverInitializedTestCase(test.TestCase): @ddt.data(True, False) def test_require_driver_initialized(self, initialized): class FakeDriver(object): @property def initialized(self): return initialized class FakeException(Exception): pass class FakeManager(object): driver = FakeDriver() @utils.require_driver_initialized def call_me(self): raise FakeException( "Should be raised only if manager.driver.initialized " "('%s') is equal to 'True'." % initialized) if initialized: expected_exception = FakeException else: expected_exception = exception.DriverNotInitialized self.assertRaises(expected_exception, FakeManager().call_me) @ddt.ddt class ShareMigrationHelperTestCase(test.TestCase): """Tests DataMigrationHelper.""" def setUp(self): super(ShareMigrationHelperTestCase, self).setUp() self.context = context.get_admin_context() def test_wait_for_access_update(self): sid = 1 fake_share_instances = [ {'id': sid, 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, {'id': sid, 'access_rules_status': constants.STATUS_ACTIVE}, ] self.mock_object(time, 'sleep') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=fake_share_instances)) utils.wait_for_access_update(self.context, db, fake_share_instances[0], 1) db.share_instance_get.assert_has_calls( [mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)] ) time.sleep.assert_called_once_with(1) @ddt.data( ( {'id': '1', 'access_rules_status': constants.STATUS_ERROR}, exception.ShareMigrationFailed ), ( {'id': '1', 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, exception.ShareMigrationFailed ), ) @ddt.unpack def test_wait_for_access_update_invalid(self, fake_instance, expected_exc): self.mock_object(time, 'sleep') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_instance)) now = time.time() timeout = now + 100 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) self.assertRaises(expected_exc, utils.wait_for_access_update, self.context, db, fake_instance, 1) manila-2.0.0/manila/tests/test_service.py0000664000567000056710000001720112701407107021567 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 NetApp, Inc. # Copyright 2014 Mirantis, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for remote procedure calls using queue """ import ddt import mock from oslo_config import cfg from manila import context from manila import db from manila import exception from manila import manager from manila import service from manila import test from manila import utils from manila import wsgi test_service_opts = [ cfg.StrOpt("fake_manager", default="manila.tests.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" RPC_API_VERSION = "1.0" def __init__(self, host=None, db_driver=None, service_name=None): super(FakeManager, self).__init__(host=host, db_driver=db_driver) def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.TestCase): """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', CONF.fake_manager) serv.start() self.assertEqual('manager', serv.test_method()) def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', CONF.fake_manager) serv.start() self.assertEqual('service', serv.test_method()) class ServiceFlagsTestCase(test.TestCase): def test_service_enabled_on_create_based_on_flag(self): self.flags(enable_new_services=True) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertFalse(ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertTrue(ref['disabled']) def fake_service_get_by_args(*args, **kwargs): raise exception.NotFound() def fake_service_get(*args, **kwargs): raise Exception() host = 'foo' binary = 'bar' topic = 'test' service_create = { 'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': 'nova', } service_ref = { 'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, 'availability_zone': {'name': 'nova'}, 'id': 1, } @ddt.ddt class ServiceTestCase(test.TestCase): """Test cases for Services.""" def test_create(self): app = service.Service.create(host='foo', binary='manila-fake', topic='fake') self.assertTrue(app) @ddt.data(True, False) def test_periodic_tasks(self, raise_on_error): serv = service.Service(host, binary, topic, CONF.fake_manager) self.mock_object( context, 'get_admin_context', mock.Mock(side_effect=context.get_admin_context)) self.mock_object(serv.manager, 'periodic_tasks') serv.periodic_tasks(raise_on_error=raise_on_error) context.get_admin_context.assert_called_once_with() serv.manager.periodic_tasks.assert_called_once_with( utils.IsAMatcher(context.RequestContext), raise_on_error=raise_on_error) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(side_effect=fake_service_get)) def test_report_state_newly_disconnected(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_called_once_with( mock.ANY, service_create) service.db.service_get.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_update', mock.Mock(return_value=service_ref. update({'report_count': 1}))) def test_report_state_newly_connected(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_called_once_with( mock.ANY, service_create) service.db.service_get.assert_called_once_with( mock.ANY, service_ref['id']) service.db.service_update.assert_called_once_with( mock.ANY, service_ref['id'], mock.ANY) class TestWSGIService(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.mock_object(wsgi.Loader, 'load_app') self.test_service = service.WSGIService("test_service") def test_service_random_port(self): self.assertEqual(0, self.test_service.port) self.test_service.start() self.assertNotEqual(0, self.test_service.port) self.test_service.stop() wsgi.Loader.load_app.assert_called_once_with("test_service") def test_reset_pool_size_to_default(self): self.test_service.start() # Stopping the service, which in turn sets pool size to 0 self.test_service.stop() self.assertEqual(0, self.test_service.server._pool.size) # Resetting pool size to default self.test_service.reset() self.test_service.start() self.assertEqual(1000, self.test_service.server._pool.size) wsgi.Loader.load_app.assert_called_once_with("test_service") manila-2.0.0/manila/tests/test_conf.py0000664000567000056710000000567612701407107021071 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila import test CONF = cfg.CONF CONF.register_opt(cfg.StrOpt('conf_unittest', default='foo', help='for testing purposes only')) class ConfigTestCase(test.TestCase): def setUp(self): super(ConfigTestCase, self).setUp() def test_declare(self): self.assertNotIn('answer', CONF) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertIn('answer', CONF) self.assertEqual(42, CONF.answer) # Make sure we don't overwrite anything CONF.set_override('answer', 256) self.assertEqual(256, CONF.answer) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertEqual(256, CONF.answer) def test_runtime_and_unknown_flags(self): self.assertNotIn('runtime_answer', CONF) import manila.tests.runtime_conf # noqa self.assertIn('runtime_answer', CONF) self.assertEqual(54, CONF.runtime_answer) def test_long_vs_short_flags(self): CONF.clear() CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) CONF.register_cli_opt(cfg.IntOpt('duplicate_answer', default=50, help='desc')) argv = ['--duplicate_answer=60'] CONF(argv, default_config_files=[]) self.assertEqual(60, CONF.duplicate_answer) self.assertEqual('val', CONF.duplicate_answer_long) def test_flag_leak_left(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_flag_leak_right(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_flag_overrides(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) CONF.reset() self.assertEqual('foo', CONF.conf_unittest) manila-2.0.0/manila/tests/test_manager.py0000664000567000056710000001225012701407107021540 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Base Manager for Manila.""" import ddt import mock from oslo_utils import importutils from manila import manager from manila import test @ddt.ddt class ManagerTestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.host = 'host' self.db_driver = 'fake_driver' self.mock_object(importutils, 'import_module') def test_verify_manager_instance(self): fake_manager = manager.Manager(self.host, self.db_driver) self.assertTrue(hasattr(fake_manager, '_periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'additional_endpoints')) self.assertTrue(hasattr(fake_manager, 'host')) self.assertTrue(hasattr(fake_manager, 'periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'init_host')) self.assertTrue(hasattr(fake_manager, 'service_version')) self.assertTrue(hasattr(fake_manager, 'service_config')) self.assertEqual(self.host, fake_manager.host) importutils.import_module.assert_called_once_with(self.db_driver) @ddt.data(True, False) def test_periodic_tasks(self, raise_on_error): fake_manager = manager.Manager(self.host, self.db_driver) fake_context = 'fake_context' self.mock_object(fake_manager, 'run_periodic_tasks') fake_manager.periodic_tasks(fake_context, raise_on_error) fake_manager.run_periodic_tasks.assert_called_once_with( fake_context, raise_on_error=raise_on_error) @ddt.ddt class SchedulerDependentManagerTestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.context = 'fake_context' self.host = 'host' self.db_driver = 'fake_driver' self.service_name = 'fake_service_name' self.mock_object(importutils, 'import_module') self.sched_manager = manager.SchedulerDependentManager( self.host, self.db_driver, self.service_name) def test_verify_scheduler_dependent_manager_instance(self): self.assertTrue(hasattr(self.sched_manager, '_periodic_tasks')) self.assertTrue(hasattr(self.sched_manager, 'additional_endpoints')) self.assertTrue(hasattr(self.sched_manager, 'host')) self.assertTrue(hasattr(self.sched_manager, 'periodic_tasks')) self.assertTrue(hasattr(self.sched_manager, 'init_host')) self.assertTrue(hasattr(self.sched_manager, 'service_version')) self.assertTrue(hasattr(self.sched_manager, 'service_config')) self.assertTrue(hasattr(self.sched_manager, 'last_capabilities')) self.assertTrue(hasattr(self.sched_manager, 'service_name')) self.assertTrue(hasattr(self.sched_manager, 'scheduler_rpcapi')) self.assertTrue(hasattr(self.sched_manager, 'update_service_capabilities')) self.assertTrue(hasattr(self.sched_manager, '_publish_service_capabilities')) self.assertEqual(self.host, self.sched_manager.host) self.assertEqual(self.service_name, self.sched_manager.service_name) importutils.import_module.assert_called_once_with(self.db_driver) @ddt.data(None, {}, [], '') def test__publish_service_capabilities_no_update(self, last_capabilities): self.sched_manager.last_capabilities = last_capabilities self.mock_object( self.sched_manager.scheduler_rpcapi, 'update_service_capabilities') self.sched_manager._publish_service_capabilities('fake_context') self.assertFalse( self.sched_manager.scheduler_rpcapi.update_service_capabilities. called) @ddt.data('fake_last_capabilities', {'foo': 'bar'}) def test__publish_service_capabilities_with_update(self, last_capabilities): self.sched_manager.last_capabilities = last_capabilities self.mock_object( self.sched_manager.scheduler_rpcapi, 'update_service_capabilities') self.mock_object(manager.LOG, 'debug') self.sched_manager._publish_service_capabilities(self.context) self.sched_manager.scheduler_rpcapi.update_service_capabilities.\ assert_called_once_with( self.context, self.service_name, self.host, last_capabilities) manager.LOG.debug.assert_called_once_with(mock.ANY) @ddt.data(None, '', [], {}, {'foo': 'bar'}) def test_update_service_capabilities(self, capabilities): self.sched_manager.update_service_capabilities(capabilities) self.assertEqual(capabilities, self.sched_manager.last_capabilities) manila-2.0.0/manila/tests/monkey_patch_example/0000775000567000056710000000000012701407265022716 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/monkey_patch_example/example_a.py0000664000567000056710000000161712701407107025223 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 manila-2.0.0/manila/tests/monkey_patch_example/__init__.py0000664000567000056710000000212212701407107025017 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func manila-2.0.0/manila/tests/monkey_patch_example/example_b.py0000664000567000056710000000162012701407107025216 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 manila-2.0.0/manila/tests/network/0000775000567000056710000000000012701407265020213 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/linux/0000775000567000056710000000000012701407265021352 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/linux/test_ovs_lib.py0000664000567000056710000000547712701407107024430 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.network.linux import ovs_lib from manila import test class OVS_Lib_Test(test.TestCase): """A test suite to exercise the OVS libraries.""" def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" self.TO = "--timeout=2" self.br = ovs_lib.OVSBridge(self.BR_NAME) self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(OVS_Lib_Test, self).tearDown() def test_reset_bridge(self): self.br.reset_bridge() self.execute.assert_has_calls([mock.call("ovs-vsctl", self.TO, "--", "--if-exists", "del-br", self.BR_NAME, run_as_root=True), mock.call("ovs-vsctl", self.TO, "add-br", self.BR_NAME, run_as_root=True)]) def test_delete_port(self): pname = "tap5" self.br.delete_port(pname) self.execute.assert_called_once_with("ovs-vsctl", self.TO, "--", "--if-exists", "del-port", self.BR_NAME, pname, run_as_root=True) def test_port_id_regex(self): result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",' ' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",' ' iface-status=active}\nname :' ' "dhc5c1321a7-c7"\nofport : 2\n') match = self.br.re_id.search(result) vif_mac = match.group('vif_mac') vif_id = match.group('vif_id') port_name = match.group('port_name') ofport = int(match.group('ofport')) self.assertEqual('fa:16:3e:23:5b:f2', vif_mac) self.assertEqual('5c1321a7-c73f-4a77-95e6-9f86402e5c8f', vif_id) self.assertEqual('dhc5c1321a7-c7', port_name) self.assertEqual(2, ofport) manila-2.0.0/manila/tests/network/linux/test_interface.py0000664000567000056710000002023412701407107024717 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from manila.network.linux import interface from manila.network.linux import ip_lib from manila import test from manila.tests import conf_fixture from manila import utils class BaseChild(interface.LinuxInterfaceDriver): def plug(*args): pass def unplug(*args): pass FakeSubnet = { 'cidr': '192.168.1.1/24', } FakeAllocation = { 'subnet': FakeSubnet, 'ip_address': '192.168.1.2', 'ip_version': 4, } FakePort = { 'id': 'abcdef01-1234-5678-90ab-ba0987654321', 'fixed_ips': [FakeAllocation], 'device_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc', } class TestBase(test.TestCase): def setUp(self): super(TestBase, self).setUp() self.conf = conf_fixture.CONF self.conf.register_opts(interface.OPTS) self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') self.ip_dev = self.ip_dev_p.start() self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') self.ip = self.ip_p.start() self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') self.device_exists = self.device_exists_p.start() self.addCleanup(self.ip_dev_p.stop) self.addCleanup(self.ip_p.stop) self.addCleanup(self.device_exists_p.stop) class TestABCDriver(TestBase): def test_verify_abs_class_has_abs_methods(self): class ICanNotBeInstancetiated(interface.LinuxInterfaceDriver): pass try: ICanNotBeInstancetiated() except TypeError: pass except Exception as e: self.fail("Unexpected exception thrown: '%s'" % six.text_type(e)) else: self.fail("ExpectedException 'TypeError' not thrown.") def test_get_device_name(self): bc = BaseChild() device_name = bc.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_l3_init(self): addresses = [dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild() ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(scope='global', filters=['permanent']), mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'), mock.call().addr.delete(4, '172.16.77.240/24')]) class TestOVSInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.OVSInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def test_plug_alt_bridge(self): self._test_plug(bridge='br-foo') def _test_plug(self, additional_expectation=None, bridge=None, namespace=None): if additional_expectation is None: additional_expectation = [] if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge vsctl_cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, 'tap0', '--', 'set', 'Interface', 'tap0', 'type=internal', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-id=port-1234', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-status=active', '--', 'set', 'Interface', 'tap0', 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] with mock.patch.object(utils, 'execute') as execute: ovs = interface.OVSInterfaceDriver() self.device_exists.side_effect = device_exists ovs.plug('tap0', 'port-1234', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace) execute.assert_called_once_with(*vsctl_cmd, run_as_root=True) expected = [mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] expected.extend(additional_expectation) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) expected.extend([mock.call().device().link.set_up()]) self.ip.assert_has_calls(expected) def test_unplug(self, bridge=None): if not bridge: bridge = 'br-int' with mock.patch('manila.network.linux.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver() ovs.unplug('tap0') ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) class TestBridgeInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.BridgeInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def _test_plug(self, namespace=None, mtu=None): def device_exists(device, root_helper=None, namespace=None): return device.startswith('brq') root_veth = mock.Mock() ns_veth = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) self.device_exists.side_effect = device_exists br = interface.BridgeInterfaceDriver() mac_address = 'aa:bb:cc:dd:ee:ff' br.plug('ns-0', 'port-1234', mac_address, namespace=namespace) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) self.ip.assert_has_calls(ip_calls) root_veth.assert_has_calls([mock.call.link.set_up()]) ns_veth.assert_has_calls([mock.call.link.set_up()]) def test_plug_dev_exists(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.warning') as log: br = interface.BridgeInterfaceDriver() br.plug('port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff') self.ip_dev.assert_has_calls([]) self.assertEqual(1, log.call_count) def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError with mock.patch('manila.network.linux.interface.LOG') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') [mock.call(), mock.call('tap0'), mock.call().link.delete()] self.assertEqual(1, log.error.call_count) def test_unplug(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.debug') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') self.assertTrue(log.called) self.ip_dev.assert_has_calls([mock.call('tap0', None), mock.call().link.delete()]) manila-2.0.0/manila/tests/network/linux/test_ip_lib.py0000664000567000056710000007167512701407107024234 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.network.linux import ip_lib from manila import test NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] LINK_SAMPLE = [ '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', '2: eth0: mtu 1500 qdisc mq state UP ' 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' '\ alias openvswitch', '3: br-int: mtu 1500 qdisc noop state DOWN ' '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff', '4: gw-ddc717df-49: mtu 1500 qdisc noop ' 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff', '5: eth0.50@eth0: mtu 1500 qdisc ' ' noqueue master brq0b24798c-07 state UP mode DEFAULT' '\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff'] ADDR_SAMPLE = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE2 = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) GATEWAY_SAMPLE5 = (""" default via 172.24.47.1 dev eth0 10.0.0.0/24 dev tapc226b810-a0 proto kernel scope link src 10.0.0.3 10.254.0.0/28 dev tap6de90453-1c proto kernel scope link src 10.254.0.4 10.35.16.0/22 proto kernel scope link src 10.35.17.97 172.24.4.0/24 via 10.35.19.254 metric 100 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") class TestSubProcessBase(test.TestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestSubProcessBase, self).tearDown() def test_execute_wrapper(self): ip_lib.SubProcessBase._execute('o', 'link', ('list',)) self.execute.assert_called_once_with('ip', '-o', 'link', 'list', run_as_root=False) def test_execute_wrapper_int_options(self): ip_lib.SubProcessBase._execute([4], 'link', ('list',)) self.execute.assert_called_once_with('ip', '-4', 'link', 'list', run_as_root=False) def test_execute_wrapper_no_options(self): ip_lib.SubProcessBase._execute([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_no_namespace(self): base = ip_lib.SubProcessBase() base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_namespace(self): base = ip_lib.SubProcessBase('ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) def test_as_root_namespace(self): base = ip_lib.SubProcessBase('ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) class TestIpWrapper(test.TestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestIpWrapper, self).tearDown() def test_get_devices(self): self.execute.return_value = '\n'.join(LINK_SAMPLE) retval = ip_lib.IPWrapper().get_devices() self.assertEqual([ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')], retval) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_devices_malformed_line(self): self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) retval = ip_lib.IPWrapper().get_devices() self.assertEqual([ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')], retval) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_namespaces(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'], retval) self.execute.assert_called_once_with('', 'netns', ('list',)) def test_add_tuntap(self): ip_lib.IPWrapper().add_tuntap('tap0') self.execute.assert_called_once_with('', 'tuntap', ('add', 'tap0', 'mode', 'tap'), None, as_root=True) def test_add_veth(self): ip_lib.IPWrapper().add_veth('tap0', 'tap1') self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1'), None, as_root=True) def test_add_veth_with_namespaces(self): ns2 = 'ns2' with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2) en.assert_has_calls([mock.call(ns2)]) self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1', 'netns', ns2), None, as_root=True) def test_get_device(self): dev = ip_lib.IPWrapper('ns').device('eth0') self.assertEqual('ns', dev.namespace) self.assertEqual('eth0', dev.name) def test_ensure_namespace(self): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper() with mock.patch.object(ip.netns, 'exists') as ns_exists: ns_exists.return_value = False ip.ensure_namespace('ns') self.execute.assert_has_calls( [mock.call([], 'netns', ('add', 'ns'), None, as_root=True)]) ip_dev.assert_has_calls([mock.call('lo', 'ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper().ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual('ns', ns.namespace) def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual([], mock_is_empty.mock_calls) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(expected, ip_ns_cmd_cls.mock_calls) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper('ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper().add_device_to_namespace(dev) self.assertEqual([], dev.mock_calls) class TestIPDevice(test.TestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') self.assertIsNotNone(dev1) def test_str(self): self.assertEqual('tap0', str(ip_lib.IPDevice('tap0'))) class TestIPCommandBase(test.TestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run('link', 'show') self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run('link', options='o') self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) def test_as_root(self): self.ip_cmd._as_root('link') self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link', ), False)]) def test_as_root_with_options(self): self.ip_cmd._as_root('link', options='o') self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link', ), False)]) class TestIPDeviceCommandBase(test.TestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual('eth0', self.ip_cmd.name) class TestIPCmdBase(test.TestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' def _assert_call(self, options, args): self.parent.assert_has_calls([ mock.call._run(options, self.command, args)]) def _assert_sudo(self, options, args, force_root_namespace=False): self.parent.assert_has_calls( [mock.call._as_root(options, self.command, args, force_root_namespace)]) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.parent._run.return_value = LINK_SAMPLE[1] self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) def test_set_address(self): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) def test_set_mtu(self): self.link_cmd.set_mtu(1500) self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) def test_set_up(self): self.link_cmd.set_up() self._assert_sudo([], ('set', 'eth0', 'up')) def test_set_down(self): self.link_cmd.set_down() self._assert_sudo([], ('set', 'eth0', 'down')) def test_set_netns(self): self.link_cmd.set_netns('foo') self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) self.assertEqual('foo', self.parent.namespace) def test_set_name(self): self.link_cmd.set_name('tap1') self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) self.assertEqual('tap1', self.parent.name) def test_set_alias(self): self.link_cmd.set_alias('openvswitch') self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) def test_delete(self): self.link_cmd.delete() self._assert_sudo([], ('delete', 'eth0')) def test_address_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('cc:dd:ee:ff:ab:cd', self.link_cmd.address) def test_mtu_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(1500, self.link_cmd.mtu) def test_qdisc_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('mq', self.link_cmd.qdisc) def test_qlen_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(1000, self.link_cmd.qlen) def test_alias_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('openvswitch', self.link_cmd.alias) def test_state_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('UP', self.link_cmd.state) def test_settings_property(self): expected = {'mtu': 1500, 'qlen': 1000, 'state': 'UP', 'qdisc': 'mq', 'brd': 'ff:ff:ff:ff:ff:ff', 'link/ether': 'cc:dd:ee:ff:ab:cd', 'alias': 'openvswitch'} self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(expected, self.link_cmd.attributes) self._assert_call('o', ('show', 'eth0')) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) def test_add_address(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'global', 'dev', 'tap0')) def test_add_address_scoped(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', scope='link') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'link', 'dev', 'tap0')) def test_del_address(self): self.addr_cmd.delete(4, '192.168.45.100/24') self._assert_sudo([4], ('del', '192.168.45.100/24', 'dev', 'tap0')) def test_flush(self): self.addr_cmd.flush() self._assert_sudo([], ('flush', 'tap0')) def test_list(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', broadcast='::'), dict(ip_version=6, scope='link', dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', broadcast='::')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case) self.assertEqual(expected, self.addr_cmd.list()) self._assert_call([], ('show', 'tap0')) def test_list_filtered(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: output = '\n'.join(test_case.split('\n')[0:4]) self.parent._run.return_value = output self.assertEqual(expected, self.addr_cmd.list('global', filters=['permanent'])) self._assert_call([], ('show', 'tap0', 'permanent', 'scope', 'global')) class TestIpRouteCommand(TestIPCmdBase): def setUp(self): super(TestIpRouteCommand, self).setUp() self.parent.name = 'eth0' self.command = 'route' self.route_cmd = ip_lib.IpRouteCommand(self.parent) def test_add_gateway(self): gateway = '192.168.45.100' metric = 100 self.route_cmd.add_gateway(gateway, metric) self._assert_sudo([], ('replace', 'default', 'via', gateway, 'metric', metric, 'dev', self.parent.name)) def test_del_gateway(self): gateway = '192.168.45.100' self.route_cmd.delete_gateway(gateway) self._assert_sudo([], ('del', 'default', 'via', gateway, 'dev', self.parent.name)) def test_get_gateway(self): test_cases = [{'sample': GATEWAY_SAMPLE1, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE2, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE3, 'expected': None}, {'sample': GATEWAY_SAMPLE4, 'expected': {'gateway': '10.35.19.254'}}] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case['sample']) self.assertEqual(test_case['expected'], self.route_cmd.get_gateway()) def test_pullup_route(self): # interface is not the first in the list - requires # deleting and creating existing entries output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) def test_pullup_route_first(self): # interface is first in the list - no changes output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') # Check two calls - device get and subnet get self.assertEqual(2, len(self.parent._run.mock_calls)) def test_list(self): self.route_cmd._as_root = mock.Mock(return_value=GATEWAY_SAMPLE5) expected = [{'Destination': 'default', 'Device': 'eth0', 'Gateway': '172.24.47.1'}, {'Destination': '10.0.0.0/24', 'Device': 'tapc226b810-a0'}, {'Destination': '10.254.0.0/28', 'Device': 'tap6de90453-1c'}, {'Destination': '10.35.16.0/22'}, {'Destination': '172.24.4.0/24', 'Gateway': '10.35.19.254'}] result = self.route_cmd.list() self.assertEqual(expected, result) self.route_cmd._as_root.assert_called_once_with('list') def test_delete_net_route(self): self.route_cmd._as_root = mock.Mock() self.route_cmd.delete_net_route('10.0.0.0/24', 'br-ex') self.route_cmd._as_root.assert_called_once_with( 'delete', '10.0.0.0/24', 'dev', 'br-ex') def test_clear_outdated_routes(self): self.route_cmd.delete_net_route = mock.Mock() list_result = [{'Destination': 'default', 'Device': 'eth0', 'Gateway': '172.24.47.1'}, {'Destination': '10.0.0.0/24', 'Device': 'eth0'}, {'Destination': '10.0.0.0/24', 'Device': 'br-ex'}] self.route_cmd.list = mock.Mock(return_value=list_result) self.route_cmd.clear_outdated_routes('10.0.0.0/24') self.route_cmd.delete_net_route.assert_called_once_with( '10.0.0.0/24', 'br-ex') class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) def test_add_namespace(self): ns = self.netns_cmd.add('ns') self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) self.assertEqual('ns', ns.namespace) def test_delete_namespace(self): with mock.patch('manila.utils.execute'): self.netns_cmd.delete('ns') self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) def test_namespace_exists(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertTrue( self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_namespace_doest_not_exist(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertFalse( self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( 'ip', 'netns', 'exec', 'ns', 'env', 'BAR=2', 'FOO=1', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) class TestDeviceExists(test.TestCase): def test_device_exists(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(ip_lib.device_exists('eth0')) _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) def test_device_does_not_exist(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = '' _execute.side_effect = RuntimeError('Device does not exist.') self.assertFalse(ip_lib.device_exists('eth0')) manila-2.0.0/manila/tests/network/linux/__init__.py0000664000567000056710000000000012701407107023444 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/neutron/0000775000567000056710000000000012701407265021705 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/neutron/__init__.py0000664000567000056710000000000012701407107023777 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/neutron/test_neutron_api.py0000664000567000056710000005553312701407107025647 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from manila import context from manila.db import base from manila import exception from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila import test from manila.tests.db import fakes CONF = cfg.CONF class FakeNeutronClient(object): def create_port(self, body): return body def delete_port(self, port_id): pass def show_port(self, port_id): pass def list_ports(self, **search_opts): pass def list_networks(self): pass def show_network(self, network_uuid): pass def show_subnet(self, subnet_uuid): pass def create_router(self, body): return body def list_routers(self): pass def create_network(self, body): return body def create_subnet(self, body): return body def update_port(self, port_id, body): return body def add_interface_router(self, router_id, subnet_id, port_id): pass def update_router(self, router_id, body): return body def show_router(self, router_id): pass def list_extensions(self): pass class NeutronApiTest(test.TestCase): def setUp(self): super(NeutronApiTest, self).setUp() self.context = context.get_admin_context() self.mock_object(base, 'Base', fakes.FakeModel) self.mock_object( clientv20, 'Client', mock.Mock(return_value=FakeNeutronClient())) self.neutron_api = neutron_api.API() def test_create_api_object(self): # instantiate Neutron API object neutron_api_instance = neutron_api.API() # Verify results self.assertTrue(hasattr(neutron_api_instance, 'client')) self.assertTrue(hasattr(neutron_api_instance, 'configuration')) self.assertEqual('DEFAULT', neutron_api_instance.config_group_name) def test_create_api_object_custom_config_group(self): # Set up test data fake_config_group_name = 'fake_config_group_name' # instantiate Neutron API object obj = neutron_api.API(fake_config_group_name) obj.get_client(mock.Mock()) # Verify results self.assertTrue(clientv20.Client.called) self.assertTrue(hasattr(obj, 'client')) self.assertTrue(hasattr(obj, 'configuration')) self.assertEqual( fake_config_group_name, obj.configuration._group.name) def test_create_port_with_all_args(self): # Set up test data self.mock_object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)) port_args = { 'tenant_id': 'test tenant', 'network_id': 'test net', 'host_id': 'test host', 'subnet_id': 'test subnet', 'fixed_ip': 'test ip', 'device_owner': 'test owner', 'device_id': 'test device', 'mac_address': 'test mac', 'security_group_ids': 'test group', 'dhcp_opts': 'test dhcp', } # Execute method 'create_port' port = self.neutron_api.create_port(**port_args) # Verify results self.assertEqual(port_args['tenant_id'], port['tenant_id']) self.assertEqual(port_args['network_id'], port['network_id']) self.assertEqual(port_args['host_id'], port['binding:host_id']) self.assertEqual(port_args['subnet_id'], port['fixed_ips'][0]['subnet_id']) self.assertEqual(port_args['fixed_ip'], port['fixed_ips'][0]['ip_address']) self.assertEqual(port_args['device_owner'], port['device_owner']) self.assertEqual(port_args['device_id'], port['device_id']) self.assertEqual(port_args['mac_address'], port['mac_address']) self.assertEqual(port_args['security_group_ids'], port['security_groups']) self.assertEqual(port_args['dhcp_opts'], port['extra_dhcp_opts']) self.neutron_api._has_port_binding_extension.assert_called_once_with() self.assertTrue(clientv20.Client.called) def test_create_port_with_required_args(self): # Set up test data self.mock_object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)) port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' port = self.neutron_api.create_port(**port_args) # Verify results self.assertEqual(port_args['tenant_id'], port['tenant_id']) self.assertEqual(port_args['network_id'], port['network_id']) self.neutron_api._has_port_binding_extension.assert_called_once_with() self.assertTrue(clientv20.Client.called) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception(self): # Set up test data self.mock_object( self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)) self.mock_object( self.neutron_api.client, 'create_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' self.assertRaises(exception.NetworkException, self.neutron_api.create_port, **port_args) # Verify results self.neutron_api._has_port_binding_extension.assert_called_once_with() self.assertTrue(neutron_api.LOG.exception.called) self.assertTrue(clientv20.Client.called) self.assertTrue(self.neutron_api.client.create_port.called) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception_status_409(self): # Set up test data self.mock_object( self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)) self.mock_object( self.neutron_api.client, 'create_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException( status_code=409))) port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' self.assertRaises(exception.PortLimitExceeded, self.neutron_api.create_port, **port_args) # Verify results self.neutron_api._has_port_binding_extension.assert_called_once_with() self.assertTrue(neutron_api.LOG.exception.called) self.assertTrue(clientv20.Client.called) self.assertTrue(self.neutron_api.client.create_port.called) def test_delete_port(self): # Set up test data self.mock_object(self.neutron_api.client, 'delete_port') port_id = 'test port id' # Execute method 'delete_port' self.neutron_api.delete_port(port_id) # Verify results self.neutron_api.client.delete_port.assert_called_once_with(port_id) self.assertTrue(clientv20.Client.called) def test_list_ports(self): # Set up test data search_opts = {'test_option': 'test_value'} fake_ports = [{'fake port': 'fake port info'}] self.mock_object( self.neutron_api.client, 'list_ports', mock.Mock(return_value={'ports': fake_ports})) # Execute method 'list_ports' ports = self.neutron_api.list_ports(**search_opts) # Verify results self.assertEqual(fake_ports, ports) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_ports.assert_called_once_with( **search_opts) def test_show_port(self): # Set up test data port_id = 'test port id' fake_port = {'fake port': 'fake port info'} self.mock_object( self.neutron_api.client, 'show_port', mock.Mock(return_value={'port': fake_port})) # Execute method 'show_port' port = self.neutron_api.show_port(port_id) # Verify results self.assertEqual(fake_port, port) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_port.assert_called_once_with(port_id) def test_get_network(self): # Set up test data network_id = 'test network id' fake_network = {'fake network': 'fake network info'} self.mock_object( self.neutron_api.client, 'show_network', mock.Mock(return_value={'network': fake_network})) # Execute method 'get_network' network = self.neutron_api.get_network(network_id) # Verify results self.assertEqual(fake_network, network) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_network.assert_called_once_with( network_id) def test_get_subnet(self): # Set up test data subnet_id = 'fake subnet id' self.mock_object( self.neutron_api.client, 'show_subnet', mock.Mock(return_value={'subnet': {}})) # Execute method 'get_subnet' subnet = self.neutron_api.get_subnet(subnet_id) # Verify results self.assertEqual({}, subnet) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_subnet.assert_called_once_with( subnet_id) def test_get_all_network(self): # Set up test data fake_networks = [{'fake network': 'fake network info'}] self.mock_object( self.neutron_api.client, 'list_networks', mock.Mock(return_value={'networks': fake_networks})) # Execute method 'get_all_networks' networks = self.neutron_api.get_all_networks() # Verify results self.assertEqual(fake_networks, networks) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_networks.assert_called_once_with() def test_list_extensions(self): # Set up test data extensions = [ {'name': neutron_constants.PORTBINDING_EXT}, {'name': neutron_constants.PROVIDER_NW_EXT}, ] self.mock_object( self.neutron_api.client, 'list_extensions', mock.Mock(return_value={'extensions': extensions})) # Execute method 'list_extensions' result = self.neutron_api.list_extensions() # Verify results self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_extensions.assert_called_once_with() self.assertIn(neutron_constants.PORTBINDING_EXT, result) self.assertIn(neutron_constants.PROVIDER_NW_EXT, result) self.assertEqual( extensions[0], result[neutron_constants.PORTBINDING_EXT]) self.assertEqual( extensions[1], result[neutron_constants.PROVIDER_NW_EXT]) def test_create_network(self): # Set up test data net_args = {'tenant_id': 'test tenant', 'name': 'test name'} # Execute method 'network_create' network = self.neutron_api.network_create(**net_args) # Verify results self.assertEqual(net_args['tenant_id'], network['tenant_id']) self.assertEqual(net_args['name'], network['name']) self.assertTrue(clientv20.Client.called) def test_create_subnet(self): # Set up test data subnet_args = { 'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24', } # Execute method 'subnet_create' subnet = self.neutron_api.subnet_create(**subnet_args) # Verify results self.assertEqual(subnet_args['tenant_id'], subnet['tenant_id']) self.assertEqual(subnet_args['name'], subnet['name']) self.assertTrue(clientv20.Client.called) def test_create_router(self): # Set up test data router_args = {'tenant_id': 'test tenant', 'name': 'test name'} # Execute method 'router_create' router = self.neutron_api.router_create(**router_args) # Verify results self.assertEqual(router_args['tenant_id'], router['tenant_id']) self.assertEqual(router_args['name'], router['name']) self.assertTrue(clientv20.Client.called) def test_list_routers(self): # Set up test data fake_routers = [{'fake router': 'fake router info'}] self.mock_object( self.neutron_api.client, 'list_routers', mock.Mock(return_value={'routers': fake_routers})) # Execute method 'router_list' networks = self.neutron_api.router_list() # Verify results self.assertEqual(fake_routers, networks) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_routers.assert_called_once_with() def test_create_network_exception(self): # Set up test data net_args = {'tenant_id': 'test tenant', 'name': 'test name'} self.mock_object( self.neutron_api.client, 'create_network', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'network_create' self.assertRaises( exception.NetworkException, self.neutron_api.network_create, **net_args) # Verify results self.neutron_api.client.create_network.assert_called_once_with( {'network': net_args}) self.assertTrue(clientv20.Client.called) def test_create_subnet_exception(self): # Set up test data subnet_args = { 'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24', } self.mock_object( self.neutron_api.client, 'create_subnet', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'subnet_create' self.assertRaises( exception.NetworkException, self.neutron_api.subnet_create, **subnet_args) # Verify results expected_data = { 'network_id': subnet_args['net_id'], 'tenant_id': subnet_args['tenant_id'], 'cidr': subnet_args['cidr'], 'name': subnet_args['name'], 'ip_version': 4, } self.neutron_api.client.create_subnet.assert_called_once_with( {'subnet': expected_data}) self.assertTrue(clientv20.Client.called) def test_create_router_exception(self): # Set up test data router_args = {'tenant_id': 'test tenant', 'name': 'test name'} self.mock_object( self.neutron_api.client, 'create_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_create' self.assertRaises( exception.NetworkException, self.neutron_api.router_create, **router_args) # Verify results self.neutron_api.client.create_router.assert_called_once_with( {'router': router_args}) self.assertTrue(clientv20.Client.called) def test_update_port_fixed_ips(self): # Set up test data port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} # Execute method 'update_port_fixed_ips' port = self.neutron_api.update_port_fixed_ips(port_id, fixed_ips) # Verify results self.assertEqual(fixed_ips, port) self.assertTrue(clientv20.Client.called) def test_update_port_fixed_ips_exception(self): # Set up test data port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} self.mock_object( self.neutron_api.client, 'update_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'update_port_fixed_ips' self.assertRaises( exception.NetworkException, self.neutron_api.update_port_fixed_ips, port_id, fixed_ips) # Verify results self.neutron_api.client.update_port.assert_called_once_with( port_id, {'port': fixed_ips}) self.assertTrue(clientv20.Client.called) def test_router_update_routes(self): # Set up test data router_id = 'test_router' routes = { 'routes': [ {'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', }, ], } # Execute method 'router_update_routes' router = self.neutron_api.router_update_routes(router_id, routes) # Verify results self.assertEqual(routes, router) self.assertTrue(clientv20.Client.called) def test_router_update_routes_exception(self): # Set up test data router_id = 'test_router' routes = { 'routes': [ {'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', }, ], } self.mock_object( self.neutron_api.client, 'update_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_update_routes' self.assertRaises( exception.NetworkException, self.neutron_api.router_update_routes, router_id, routes) # Verify results self.neutron_api.client.update_router.assert_called_once_with( router_id, {'router': routes}) self.assertTrue(clientv20.Client.called) def test_show_router(self): # Set up test data router_id = 'test router id' fake_router = {'fake router': 'fake router info'} self.mock_object( self.neutron_api.client, 'show_router', mock.Mock(return_value={'router': fake_router})) # Execute method 'show_router' port = self.neutron_api.show_router(router_id) # Verify results self.assertEqual(fake_router, port) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_router.assert_called_once_with(router_id) def test_router_add_interface(self): # Set up test data router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' self.mock_object(self.neutron_api.client, 'add_interface_router') # Execute method 'router_add_interface' self.neutron_api.router_add_interface(router_id, subnet_id, port_id) # Verify results self.neutron_api.client.add_interface_router.assert_called_once_with( port_id, {'subnet_id': subnet_id, 'port_id': port_id}) self.assertTrue(clientv20.Client.called) def test_router_add_interface_exception(self): # Set up test data router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' self.mock_object( self.neutron_api.client, 'add_interface_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_add_interface' self.assertRaises( exception.NetworkException, self.neutron_api.router_add_interface, router_id, subnet_id, port_id) # Verify results self.neutron_api.client.add_interface_router.assert_called_once_with( router_id, {'subnet_id': subnet_id, 'port_id': port_id}) self.assertTrue(clientv20.Client.called) def test_admin_project_id_exist(self): fake_admin_project_id = 'fake_admin_project_id_value' self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock() self.neutron_api.client.httpclient.get_project_id = mock.Mock( return_value=fake_admin_project_id) admin_project_id = self.neutron_api.admin_project_id self.assertEqual(fake_admin_project_id, admin_project_id) self.neutron_api.client.httpclient.auth_token.called def test_admin_project_id_not_exist(self): fake_admin_project_id = 'fake_admin_project_id_value' self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock( return_value=None) self.neutron_api.client.httpclient.authenticate = mock.Mock() self.neutron_api.client.httpclient.get_project_id = mock.Mock( return_value=fake_admin_project_id) admin_project_id = self.neutron_api.admin_project_id self.assertEqual(fake_admin_project_id, admin_project_id) self.neutron_api.client.httpclient.auth_token.called self.neutron_api.client.httpclient.authenticate.called def test_admin_project_id_not_exist_with_failure(self): self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = None self.neutron_api.client.httpclient.authenticate = mock.Mock( side_effect=neutron_client_exc.NeutronClientException) self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock() try: self.neutron_api.admin_project_id except exception.NetworkException: pass else: raise Exception('Expected error was not raised') self.assertTrue(self.neutron_api.client.httpclient.authenticate.called) self.assertFalse( self.neutron_api.client.httpclient.auth_tenant_id.called) def test_get_all_admin_project_networks(self): fake_networks = {'networks': ['fake_net_1', 'fake_net_2']} self.mock_object( self.neutron_api.client, 'list_networks', mock.Mock(return_value=fake_networks)) self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock() self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock() networks = self.neutron_api.get_all_admin_project_networks() self.assertEqual(fake_networks['networks'], networks) self.neutron_api.client.httpclient.auth_token.called self.neutron_api.client.httpclient.auth_tenant_id.called self.neutron_api.client.list_networks.assert_called_once_with( tenant_id=self.neutron_api.admin_project_id, shared=False) manila-2.0.0/manila/tests/network/neutron/test_neutron_plugin.py0000664000567000056710000004652412701407107026374 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Mirantis, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_config import cfg from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila.network.neutron import neutron_network_plugin as plugin from manila import test from manila.tests import utils as test_utils CONF = cfg.CONF fake_neutron_port = { "status": "test_port_status", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "test_net_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "test", "binding:capabilities": {"port_filter": True}, "mac_address": "test_mac", "fixed_ips": [ {"subnet_id": "test_subnet_id", "ip_address": "test_ip"}, ], "id": "test_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id", } fake_share_network = { 'id': 'fake nw info id', 'neutron_subnet_id': 'fake subnet id', 'neutron_net_id': 'fake net id', 'project_id': 'fake project id', 'status': 'test_subnet_status', 'name': 'fake name', 'description': 'fake description', 'security_services': [], 'network_type': 'fake_network_type', 'segmentation_id': 1234, 'ip_version': 4, 'cidr': 'fake_cidr', } fake_share_server = { 'id': 'fake nw info id', 'status': 'test_server_status', 'host': 'fake@host', 'network_allocations': [], 'shares': [], } fake_network_allocation = { 'id': fake_neutron_port['id'], 'share_server_id': fake_share_server['id'], 'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': 'user', 'network_type': fake_share_network['network_type'], 'segmentation_id': fake_share_network['segmentation_id'], 'ip_version': fake_share_network['ip_version'], 'cidr': fake_share_network['cidr'], } class NeutronNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronNetworkPluginTest, self).setUp() self.plugin = plugin.NeutronNetworkPlugin() self.plugin.db = db_api self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, allocation_info={'count': 1}) has_provider_nw_ext.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network) self.plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share') db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_two_allocation(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, count=2) neutron_api_calls = [ mock.call(fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share'), mock.call(fake_share_network['project_id'], network_id=fake_share_network['neutron_net_id'], subnet_id=fake_share_network['neutron_subnet_id'], device_owner='manila:share'), ] db_api_calls = [ mock.call(self.fake_context, fake_network_allocation), mock.call(self.fake_context, fake_network_allocation) ] self.plugin.neutron_api.create_port.assert_has_calls( neutron_api_calls) db_api.network_allocation_create.assert_has_calls(db_api_calls) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_allocate_network_create_port_exception(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() create_port = mock.patch.object(self.plugin.neutron_api, 'create_port').start() create_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.allocate_network, self.fake_context, fake_share_server, fake_share_network) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() create_port.stop() @mock.patch.object(db_api, 'network_allocation_delete', mock.Mock()) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) @mock.patch.object(db_api, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_network_allocation])) def test_deallocate_network_nominal(self): share_srv = {'id': fake_share_server['id']} share_srv['network_allocations'] = [fake_network_allocation] with mock.patch.object(self.plugin.neutron_api, 'delete_port', mock.Mock()): self.plugin.deallocate_network(self.fake_context, share_srv) self.plugin.neutron_api.delete_port.assert_called_once_with( fake_network_allocation['id']) db_api.network_allocation_delete.assert_called_once_with( self.fake_context, fake_network_allocation['id']) @mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'network_allocation_update', mock.Mock()) @mock.patch.object(db_api, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_network_allocation])) def test_deallocate_network_neutron_api_exception(self): share_srv = {'id': fake_share_server['id']} share_srv['network_allocations'] = [fake_network_allocation] delete_port = mock.patch.object(self.plugin.neutron_api, 'delete_port').start() delete_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.deallocate_network, self.fake_context, share_srv) db_api.network_allocation_update.assert_called_once_with( self.fake_context, fake_network_allocation['id'], {'status': constants.STATUS_ERROR}) delete_port.stop() @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_network_data(self): neutron_nw_info = {'provider:network_type': 'vlan', 'provider:segmentation_id': 1000} share_nw_update_dict = {'network_type': 'vlan', 'segmentation_id': 1000} with mock.patch.object(self.plugin.neutron_api, 'get_network', mock.Mock(return_value=neutron_nw_info)): self.plugin._save_neutron_network_data(self.fake_context, fake_share_network) self.plugin.neutron_api.get_network.assert_called_once_with( fake_share_network['neutron_net_id']) self.plugin.db.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], share_nw_update_dict) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_subnet_data(self): neutron_subnet_info = {'cidr': '10.0.0.0/24', 'ip_version': 4} with mock.patch.object(self.plugin.neutron_api, 'get_subnet', mock.Mock(return_value=neutron_subnet_info)): self.plugin._save_neutron_subnet_data(self.fake_context, fake_share_network) self.plugin.neutron_api.get_subnet.assert_called_once_with( fake_share_network['neutron_subnet_id']) self.plugin.db.share_network_update.assert_called_once_with( self.fake_context, fake_share_network['id'], neutron_subnet_info) def test_has_network_provider_extension_true(self): extensions = {neutron_constants.PROVIDER_NW_EXT: {}} with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value=extensions)): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertTrue(result) def test_has_network_provider_extension_false(self): with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value={})): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertFalse(result) @ddt.ddt class NeutronSingleNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronSingleNetworkPluginTest, self).setUp() self.context = 'fake_context' def test_init_valid(self): fake_net_id = 'fake_net_id' fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin() self.assertEqual(fake_net_id, instance.net) self.assertEqual(fake_subnet_id, instance.subnet) neutron_api.API.get_network.assert_called_once_with(fake_net_id) @ddt.data( {'net': None, 'subnet': None}, {'net': 'fake_net_id', 'subnet': None}, {'net': None, 'subnet': 'fake_subnet_id'}) @ddt.unpack def test_init_invalid(self, net, subnet): config_data = dict() # Simulate absence of set values if net: config_data['neutron_net_id'] = net if subnet: config_data['neutron_subnet_id'] = subnet config_data = dict(DEFAULT=config_data) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) @ddt.data({}, {'subnets': []}, {'subnets': ['different_foo_subnet']}) def test_init_subnet_does_not_belong_to_net(self, fake_net): fake_net_id = 'fake_net_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': 'fake_subnet_id', } } self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) neutron_api.API.get_network.assert_called_once_with(fake_net_id) def _get_neutron_single_network_plugin_instance(self): fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': [fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin() return instance def test___update_share_network_net_data_same_values(self): instance = self._get_neutron_single_network_plugin_instance() share_network = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } result = instance._update_share_network_net_data( self.context, share_network) self.assertEqual(share_network, result) def test___update_share_network_net_data_different_values_empty(self): instance = self._get_neutron_single_network_plugin_instance() share_network_input = { 'id': 'fake_share_network_id', } share_network_result = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value='foo')) instance._update_share_network_net_data( self.context, share_network_input) instance.db.share_network_update.assert_called_once_with( self.context, share_network_input['id'], share_network_result) @ddt.data( {'n': 'fake_net_id', 's': 'bar'}, {'n': 'foo', 's': 'fake_subnet_id'}) @ddt.unpack def test___update_share_network_net_data_different_values(self, n, s): instance = self._get_neutron_single_network_plugin_instance() share_network = { 'id': 'fake_share_network_id', 'neutron_net_id': n, 'neutron_subnet_id': s, } self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value=share_network)) self.assertRaises( exception.NetworkBadConfigurationException, instance._update_share_network_net_data, self.context, share_network) self.assertFalse(instance.db.share_network_update.called) def test___update_share_network_net_data_nova_net_id_present(self): instance = self._get_neutron_single_network_plugin_instance() share_network = { 'id': 'fake_share_network_id', 'nova_net_id': 'foo', } self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value=share_network)) self.assertRaises( exception.NetworkBadConfigurationException, instance._update_share_network_net_data, self.context, share_network) self.assertFalse(instance.db.share_network_update.called) @mock.patch.object( plugin.NeutronNetworkPlugin, "allocate_network", mock.Mock()) def test_allocate_network(self): instance = self._get_neutron_single_network_plugin_instance() share_server = 'fake_share_server' share_network = 'fake_share_network' share_network_upd = 'updated_fake_share_network' count = 2 device_owner = 'fake_device_owner' self.mock_object( instance, '_update_share_network_net_data', mock.Mock(return_value=share_network_upd)) instance.allocate_network( self.context, share_server, share_network, count=count, device_owner=device_owner) instance._update_share_network_net_data.assert_called_once_with( self.context, share_network) plugin.NeutronNetworkPlugin.allocate_network.assert_called_once_with( self.context, share_server, share_network_upd, count=count, device_owner=device_owner) manila-2.0.0/manila/tests/network/__init__.py0000664000567000056710000000000012701407107022305 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/network/test_standalone_network_plugin.py0000664000567000056710000004456512701407107027114 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import netaddr from oslo_config import cfg import six from manila.common import constants from manila import context from manila import exception from manila.network import standalone_network_plugin as plugin from manila import test from manila.tests import utils as test_utils CONF = cfg.CONF fake_context = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) fake_share_server = dict(id='fake_share_server_id') fake_share_network = dict(id='fake_share_network_id') @ddt.ddt class StandaloneNetworkPluginTest(test.TestCase): @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_only_with_required_data_v4(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual('10.0.0.1', instance.gateway) self.assertEqual('24', instance.mask) self.assertIsNone(instance.segmentation_id) self.assertIsNone(instance.allowed_ip_ranges) self.assertEqual(4, instance.ip_version) self.assertEqual(netaddr.IPNetwork('10.0.0.1/24'), instance.net) self.assertEqual(['10.0.0.1/24'], instance.allowed_cidrs) self.assertEqual( ('10.0.0.0', '10.0.0.1', '10.0.0.255'), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_with_all_data_v4(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 1001, 'standalone_network_plugin_allowed_ip_ranges': ( '10.0.0.3-10.0.0.7,10.0.0.69-10.0.0.157,10.0.0.213'), 'standalone_network_plugin_ip_version': 4, }, } allowed_cidrs = [ '10.0.0.3/32', '10.0.0.4/30', '10.0.0.69/32', '10.0.0.70/31', '10.0.0.72/29', '10.0.0.80/28', '10.0.0.96/27', '10.0.0.128/28', '10.0.0.144/29', '10.0.0.152/30', '10.0.0.156/31', '10.0.0.213/32', ] with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual(4, instance.ip_version) self.assertEqual('10.0.0.1', instance.gateway) self.assertEqual('255.255.0.0', instance.mask) self.assertEqual('vlan', instance.network_type) self.assertEqual(1001, instance.segmentation_id) self.assertEqual(allowed_cidrs, instance.allowed_cidrs) self.assertEqual( ['10.0.0.3-10.0.0.7', '10.0.0.69-10.0.0.157', '10.0.0.213'], instance.allowed_ip_ranges) self.assertEqual( netaddr.IPNetwork('10.0.0.1/255.255.0.0'), instance.net) self.assertEqual( ('10.0.0.0', '10.0.0.1', '10.0.255.255'), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_only_with_required_data_v6(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': ( '2001:cdba::3257:9652'), 'standalone_network_plugin_mask': '48', 'standalone_network_plugin_ip_version': 6, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual( '2001:cdba::3257:9652', instance.gateway) self.assertEqual('48', instance.mask) self.assertIsNone(instance.segmentation_id) self.assertIsNone(instance.allowed_ip_ranges) self.assertEqual(6, instance.ip_version) self.assertEqual( netaddr.IPNetwork('2001:cdba::3257:9652/48'), instance.net) self.assertEqual( ['2001:cdba::3257:9652/48'], instance.allowed_cidrs) self.assertEqual( ('2001:cdba::', '2001:cdba::3257:9652', '2001:cdba:0:ffff:ffff:ffff:ffff:ffff'), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_with_all_data_v6(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '2001:db8::0001', 'standalone_network_plugin_mask': '88', 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 3999, 'standalone_network_plugin_allowed_ip_ranges': ( '2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'), 'standalone_network_plugin_ip_version': 6, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual(6, instance.ip_version) self.assertEqual('2001:db8::0001', instance.gateway) self.assertEqual('88', instance.mask) self.assertEqual('vlan', instance.network_type) self.assertEqual(3999, instance.segmentation_id) self.assertEqual(['2001:db8::/89'], instance.allowed_cidrs) self.assertEqual( ['2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'], instance.allowed_ip_ranges) self.assertEqual( netaddr.IPNetwork('2001:db8::0001/88'), instance.net) self.assertEqual( ('2001:db8::', '2001:db8::0001', '2001:db8::ff:ffff:ffff'), instance.reserved_addresses) @ddt.data('flat', 'vlan', 'vxlan', 'gre') def test_init_with_valid_network_types_v4(self, network_type): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': network_type, 'standalone_network_plugin_segmentation_id': 1001, 'standalone_network_plugin_ip_version': 4, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name='DEFAULT') self.assertEqual(instance.network_type, network_type) @ddt.data( 'foo', 'foovlan', 'vlanfoo', 'foovlanbar', 'None', 'Vlan', 'vlaN') def test_init_with_fake_network_types_v4(self, fake_network_type): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': fake_network_type, 'standalone_network_plugin_segmentation_id': 1001, 'standalone_network_plugin_ip_version': 4, }, } with test_utils.create_temp_config_with_opts(data): self.assertRaises( cfg.ConfigFileValueError, plugin.StandaloneNetworkPlugin, config_group_name='DEFAULT', ) @ddt.data('custom_config_group_name', 'DEFAULT') def test_invalid_init_without_any_config_definitions(self, group_name): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( {}, {'gateway': '20.0.0.1'}, {'mask': '8'}, {'gateway': '20.0.0.1', 'mask': '33'}, {'gateway': '20.0.0.256', 'mask': '16'}) def test_invalid_init_required_data_improper(self, data): group_name = 'custom_group_name' if 'gateway' in data: data['standalone_network_plugin_gateway'] = data.pop('gateway') if 'mask' in data: data['standalone_network_plugin_mask'] = data.pop('mask') data = {group_name: data} with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( 'fake', '11.0.0.0-11.0.0.5-11.0.0.11', '11.0.0.0-11.0.0.5', '10.0.10.0-10.0.10.5', '10.0.0.0-10.0.0.5,fake', '10.0.10.0-10.0.10.5,10.0.0.0-10.0.0.5', '10.0.10.0-10.0.10.5,10.0.0.10-10.0.10.5', '10.0.0.0-10.0.0.5,10.0.10.0-10.0.10.5') def test_invalid_init_incorrect_allowed_ip_ranges_v4(self, ip_range): group_name = 'DEFAULT' data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.255.0', 'standalone_network_plugin_allowed_ip_ranges': ip_range, }, } with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( {'gateway': '2001:db8::0001', 'vers': 4}, {'gateway': '10.0.0.1', 'vers': 6}) @ddt.unpack def test_invalid_init_mismatch_of_versions(self, gateway, vers): group_name = 'DEFAULT' data = { group_name: { 'standalone_network_plugin_gateway': gateway, 'standalone_network_plugin_ip_version': vers, 'standalone_network_plugin_mask': '25', }, } with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) def test_deallocate_network(self): share_server_id = 'fake_share_server_id' data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } fake_allocations = [{'id': 'fake1'}, {'id': 'fake2'}] with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object( instance.db, 'network_allocations_get_for_share_server', mock.Mock(return_value=fake_allocations)) self.mock_object(instance.db, 'network_allocation_delete') instance.deallocate_network(fake_context, share_server_id) instance.db.network_allocations_get_for_share_server.\ assert_called_once_with(fake_context, share_server_id) instance.db.network_allocation_delete.\ assert_has_calls([ mock.call(fake_context, 'fake1'), mock.call(fake_context, 'fake2'), ]) def test_allocate_network_zero_addresses_ipv4(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_update') allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network, count=0) self.assertEqual([], allocations) instance.db.share_network_update.assert_called_once_with( fake_context, fake_share_network['id'], dict(network_type=None, segmentation_id=None, cidr=six.text_type(instance.net.cidr), ip_version=4)) def test_allocate_network_zero_addresses_ipv6(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '2001:db8::0001', 'standalone_network_plugin_mask': '64', 'standalone_network_plugin_ip_version': 6, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_update') allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network, count=0) self.assertEqual([], allocations) instance.db.share_network_update.assert_called_once_with( fake_context, fake_share_network['id'], dict(network_type=None, segmentation_id=None, cidr=six.text_type(instance.net.cidr), ip_version=6)) def test_allocate_network_one_ip_address_ipv4_no_usages_exist(self): data = { 'DEFAULT': { 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 1003, 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(return_value=[])) allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network) self.assertEqual(1, len(allocations)) na_data = { 'network_type': 'vlan', 'segmentation_id': 1003, 'cidr': '10.0.0.0/24', 'ip_version': 4, } instance.db.share_network_update.assert_called_once_with( fake_context, fake_share_network['id'], na_data) instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(fake_context, '10.0.0.2')]) instance.db.network_allocation_create.assert_called_once_with( fake_context, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.2', status=constants.STATUS_ACTIVE, label='user', **na_data)) def test_allocate_network_two_ip_addresses_ipv4_two_usages_exist(self): ctxt = type('FakeCtxt', (object,), {'fake': ['10.0.0.2', '10.0.0.4']}) def fake_get_allocations_by_ip_address(context, ip_address): if ip_address not in context.fake: context.fake.append(ip_address) return [] else: return context.fake data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(side_effect=fake_get_allocations_by_ip_address)) allocations = instance.allocate_network( ctxt, fake_share_server, fake_share_network, count=2) self.assertEqual(2, len(allocations)) na_data = { 'network_type': None, 'segmentation_id': None, 'cidr': six.text_type(instance.net.cidr), 'ip_version': 4, } instance.db.share_network_update.assert_called_once_with( ctxt, fake_share_network['id'], dict(**na_data)) instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(ctxt, '10.0.0.2'), mock.call(ctxt, '10.0.0.3'), mock.call(ctxt, '10.0.0.4'), mock.call(ctxt, '10.0.0.5')]) instance.db.network_allocation_create.assert_has_calls([ mock.call( ctxt, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.3', status=constants.STATUS_ACTIVE, label='user', **na_data)), mock.call( ctxt, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.5', status=constants.STATUS_ACTIVE, label='user', **na_data)), ]) def test_allocate_network_no_available_ipv4_addresses(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '30', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(return_value=['not empty list'])) self.assertRaises( exception.NetworkBadConfigurationException, instance.allocate_network, fake_context, fake_share_server, fake_share_network) instance.db.share_network_update.assert_called_once_with( fake_context, fake_share_network['id'], dict(network_type=None, segmentation_id=None, cidr=six.text_type(instance.net.cidr), ip_version=4)) instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(fake_context, '10.0.0.2')]) manila-2.0.0/manila/tests/network/test_nova_network_plugin.py0000664000567000056710000003616212701407107025721 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from manila import context from manila import exception from manila.network import nova_network_plugin as plugin from manila import test from manila.tests import utils as test_utils @ddt.ddt class NovaNetworkPluginTest(test.TestCase): def setUp(self): super(NovaNetworkPluginTest, self).setUp() self.fake_context = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) self.instance = plugin.NovaNetworkPlugin() self.share_server = dict(id='fake_share_server_id') self.share_network = dict( id='fake_sn_id', nova_net_id='fake_nova_net_id') def test_allocate_network_get_zero(self): share_network = 'fake_share_network' allocations = self.instance.allocate_network( self.fake_context, self.share_server, share_network, count=0) self.assertEqual([], allocations) self.assertTrue(hasattr(self.instance, 'label')) self.assertEqual('user', self.instance.label) @ddt.data('flat', 'vlan') def test_allocate_network_get_one(self, net_type): def fake_get_ip_from_db(context, ip_addr): return [] if ip_addr != '20.0.0.7' else ['fake not empty list'] def fake_fixed_ip_get(context, ip_addr): if ip_addr == '20.0.0.8': return dict(host='foo', hostname='bar') return dict(host=None, hostname=None) share_network = dict(id='fake_sn_id', nova_net_id='fake_nova_net_id') nova_net = dict( cidr='20.0.0.0/24', cidr_v6=None, gateway='20.0.0.1', gateway_v6=None, dhcp_server='20.0.0.2', broadcast='20.0.0.255', vpn_private_address='20.0.0.3', vpn_public_address='20.0.0.4', dns1='20.0.0.5', dns2='20.0.0.6', vlan=None) if net_type == 'vlan': nova_net['vlan'] = 100 self.mock_object(self.instance.nova_api, 'fixed_ip_reserve') self.mock_object( self.instance.nova_api, 'fixed_ip_get', mock.Mock(side_effect=fake_fixed_ip_get)) self.mock_object( self.instance.nova_api, 'network_get', mock.Mock(return_value=nova_net)) self.mock_object(self.instance.db, 'share_network_update') self.mock_object( self.instance.db, 'network_allocations_get_by_ip_address', mock.Mock(side_effect=fake_get_ip_from_db)) expected_ip_address = '20.0.0.9' allocations = self.instance.allocate_network( self.fake_context, self.share_server, share_network) self.assertEqual(1, len(allocations)) self.assertEqual( self.share_server['id'], allocations[0]['share_server_id']) self.assertEqual(expected_ip_address, allocations[0]['ip_address']) self.instance.nova_api.network_get.assert_called_once_with( self.instance.admin_context, share_network['nova_net_id']) self.instance.nova_api.fixed_ip_reserve.assert_called_once_with( self.instance.admin_context, expected_ip_address) self.instance.db.share_network_update.assert_called_once_with( self.fake_context, share_network['id'], dict(cidr=nova_net['cidr'], ip_version=4, segmentation_id=nova_net['vlan'], network_type=net_type)) self.instance.db.network_allocations_get_by_ip_address.\ assert_has_calls([ mock.call(self.fake_context, '20.0.0.7'), mock.call(self.fake_context, '20.0.0.8'), mock.call(self.fake_context, '20.0.0.9')]) self.instance.nova_api.fixed_ip_get.assert_has_calls([ mock.call(self.instance.admin_context, '20.0.0.8'), mock.call(self.instance.admin_context, '20.0.0.9')]) @ddt.data('flat', 'vlan') def test_allocate_network_get_two(self, net_type): def fake_get_ip_from_db(context, ip_addr): return [] if ip_addr != '20.0.0.7' else ['fake not empty list'] def fake_fixed_ip_get(context, ip_addr): if ip_addr == '20.0.0.8': return dict(host='foo', hostname='bar') return dict(host=None, hostname=None) nova_net = dict( cidr='20.0.0.0/24', cidr_v6=None, gateway='20.0.0.1', gateway_v6=None, dhcp_server='20.0.0.254', broadcast='20.0.0.255', vpn_private_address='20.0.0.3', vpn_public_address='20.0.0.4', dns1='20.0.0.5', dns2='20.0.0.6', vlan=None) if net_type == 'vlan': nova_net['vlan'] = 100 self.mock_object(self.instance.nova_api, 'fixed_ip_reserve') self.mock_object( self.instance.nova_api, 'fixed_ip_get', mock.Mock(side_effect=fake_fixed_ip_get)) self.mock_object( self.instance.nova_api, 'network_get', mock.Mock(return_value=nova_net)) self.mock_object(self.instance.db, 'share_network_update') self.mock_object( self.instance.db, 'network_allocations_get_by_ip_address', mock.Mock(side_effect=fake_get_ip_from_db)) expected_ip_address1 = '20.0.0.2' expected_ip_address2 = '20.0.0.9' allocations = self.instance.allocate_network( self.fake_context, self.share_server, self.share_network, count=2) self.assertEqual(2, len(allocations)) for allocation in allocations: self.assertEqual( self.share_server['id'], allocation['share_server_id']) self.assertEqual(expected_ip_address1, allocations[0]['ip_address']) self.assertEqual(expected_ip_address2, allocations[1]['ip_address']) self.instance.nova_api.network_get.assert_called_once_with( self.instance.admin_context, self.share_network['nova_net_id']) self.instance.nova_api.fixed_ip_reserve.assert_has_calls([ mock.call(self.instance.admin_context, expected_ip_address1), mock.call(self.instance.admin_context, expected_ip_address2)]) self.instance.db.share_network_update.assert_called_once_with( self.fake_context, self.share_network['id'], dict(cidr=nova_net['cidr'], ip_version=4, segmentation_id=nova_net['vlan'], network_type=net_type)) self.instance.db.network_allocations_get_by_ip_address.\ assert_has_calls([ mock.call(self.fake_context, '20.0.0.2'), mock.call(self.fake_context, '20.0.0.7'), mock.call(self.fake_context, '20.0.0.8'), mock.call(self.fake_context, '20.0.0.9')]) self.instance.nova_api.fixed_ip_get.assert_has_calls([ mock.call(self.instance.admin_context, '20.0.0.2'), mock.call(self.instance.admin_context, '20.0.0.8'), mock.call(self.instance.admin_context, '20.0.0.9')]) def test_allocate_network_nova_net_id_no_available_ips_left(self): nova_net = dict( id='fake_net_id', cidr='20.0.0.0/24', cidr_v6=None, gateway='20.0.0.1', gateway_v6=None, dhcp_server='20.0.0.2', broadcast='20.0.0.255', vpn_private_address='20.0.0.3', vpn_public_address='20.0.0.4', dns1='20.0.0.5', dns2='20.0.0.6', vlan=100) self.mock_object( self.instance.nova_api, 'network_get', mock.Mock(return_value=nova_net)) self.mock_object(self.instance.db, 'share_network_update') self.mock_object( self.instance.db, 'network_allocations_get_by_ip_address', mock.Mock(return_value=['fake not empty list'])) self.assertRaises( exception.NetworkBadConfigurationException, self.instance.allocate_network, self.fake_context, self.share_server, self.share_network) self.instance.nova_api.network_get.assert_called_once_with( self.instance.admin_context, self.share_network['nova_net_id']) self.instance.db.share_network_update.assert_called_once_with( self.fake_context, self.share_network['id'], dict(cidr=nova_net['cidr'], ip_version=4, segmentation_id=nova_net['vlan'], network_type='vlan')) self.assertEqual( 248, self.instance.db.network_allocations_get_by_ip_address.call_count) @ddt.data(dict(), dict(nova_net_id=None)) def test_allocate_network_nova_net_id_is_not_provided(self, share_network): self.assertRaises( exception.NetworkException, self.instance.allocate_network, self.fake_context, self.share_server, share_network) def test_deallocate_network(self): fake_alloc = dict(id='fake_alloc_id', ip_address='fake_ip_address') self.mock_object(self.instance.nova_api, 'fixed_ip_unreserve') self.mock_object(self.instance.db, 'network_allocation_delete') self.mock_object( self.instance.db, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_alloc])) self.instance.deallocate_network( self.fake_context, self.share_server['id']) self.instance.db.network_allocations_get_for_share_server.\ assert_called_once_with( self.fake_context, self.share_server['id']) self.instance.db.network_allocation_delete.assert_called_once_with( self.fake_context, fake_alloc['id']) self.instance.nova_api.fixed_ip_unreserve.assert_called_once_with( self.instance.admin_context, fake_alloc['ip_address']) @ddt.ddt class NovaSingleNetworkPluginTest(test.TestCase): def setUp(self): super(NovaSingleNetworkPluginTest, self).setUp() self.share_server = dict(id='fake_share_server_id') self.context = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) def _get_instance(self, label=None): nova_net_id = 'fake_nova_net_id' config_data = dict( DEFAULT=dict(nova_single_network_plugin_net_id=nova_net_id)) with test_utils.create_temp_config_with_opts(config_data): return plugin.NovaSingleNetworkPlugin(label=label) def test_init_valid(self): nova_net_id = 'fake_nova_net_id' config_data = dict( DEFAULT=dict(nova_single_network_plugin_net_id=nova_net_id)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NovaSingleNetworkPlugin() self.assertEqual(nova_net_id, instance.net_id) @ddt.data(dict(), dict(net='')) def test_init_invalid(self, data): config_data = dict(DEFAULT=data) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NovaSingleNetworkPlugin) def test_allocate_network_net_is_not_set_in_share_network(self): instance = self._get_instance() share_network = dict(id='fake_share_network') updated_share_network = dict(id='fake_updated_share_network') allocations = ['foo', 'bar'] self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value=updated_share_network)) self.mock_object( instance, '_allocate_network', mock.Mock(return_value=allocations)) result = instance.allocate_network( self.context, self.share_server, share_network, count=2) self.assertEqual(allocations, result) instance.db.share_network_update.assert_called_once_with( self.context, share_network['id'], dict(nova_net_id='fake_nova_net_id')) instance._allocate_network.assert_called_once_with( self.context, self.share_server, updated_share_network, count=2) def test_allocate_network_net_is_set_in_share_network(self): instance = self._get_instance() share_network = dict( id='fake_share_network', nova_net_id='fake_nova_net_id') allocations = ['foo', 'bar'] self.mock_object(instance.db, 'share_network_update') self.mock_object( instance, '_allocate_network', mock.Mock(return_value=allocations)) result = instance.allocate_network( self.context, self.share_server, share_network, count=2) self.assertEqual(allocations, result) instance.db.share_network_update.assert_has_calls([]) instance._allocate_network.assert_called_once_with( self.context, self.share_server, share_network, count=2) def test_allocate_network_with_admin_label(self): instance = self._get_instance(label='admin') allocations = ['foo', 'bar'] self.mock_object(instance.db, 'share_network_update') self.mock_object( instance, '_allocate_network', mock.Mock(return_value=allocations)) fake_share_network = {'nova_net_id': 'fake_nova_net_id'} result = instance.allocate_network( self.context, self.share_server, fake_share_network, count=2) self.assertTrue(hasattr(instance, 'label')) self.assertEqual('admin', instance.label) self.assertEqual(allocations, result) instance.db.share_network_update.assert_has_calls([]) instance._allocate_network.assert_called_once_with( self.context, self.share_server, fake_share_network, count=2) def test_allocate_network_different_nova_net_id_is_set(self): instance = self._get_instance() share_network = dict( id='fake_share_network', nova_net_id='foobar') self.mock_object(instance.db, 'share_network_update') self.mock_object(instance, '_allocate_network') self.assertRaises( exception.NetworkBadConfigurationException, instance.allocate_network, self.context, self.share_server, share_network, count=3) instance.db.share_network_update.assert_has_calls([]) instance._allocate_network.assert_has_calls([]) @ddt.data( dict(id='foo', neutron_net_id='bar'), dict(id='foo', neutron_subnet_id='quuz'), dict(id='foo', neutron_net_id='bar', neutron_subnet_id='quuz')) def test_allocate_network_neutron_data_exist(self, sn): instance = self._get_instance() self.mock_object(instance.db, 'share_network_update') self.mock_object(instance, '_allocate_network') self.assertRaises( exception.NetworkBadConfigurationException, instance.allocate_network, self.context, self.share_server, sn, count=3) instance.db.share_network_update.assert_has_calls([]) instance._allocate_network.assert_has_calls([]) manila-2.0.0/manila/tests/policy.json0000664000567000056710000001062612701407107020714 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "admin_api": "is_admin:True", "admin_or_owner": "is_admin:True or project_id:%(project_id)s", "default": "rule:admin_or_owner", "availability_zone:index": "rule:default", "quota_set:update": "rule:admin_api", "quota_set:show": "rule:default", "quota_set:delete": "rule:admin_api", "quota_class_set:show": "rule:default", "quota_class_set:update": "rule:admin_api", "service:index": "rule:admin_api", "service:update": "rule:admin_api", "share:create": "", "share:list_by_share_server_id": "rule:admin_api", "share:get": "", "share:get_all": "", "share:delete": "rule:default", "share:update": "rule:default", "share:snapshot_update": "", "share:create_snapshot": "", "share:delete_snapshot": "", "share:get_snapshot": "", "share:get_all_snapshots": "", "share:extend": "", "share:shrink": "", "share:manage": "rule:admin_api", "share:unmanage": "rule:admin_api", "share:force_delete": "rule:admin_api", "share:reset_status": "rule:admin_api", "share:migration_start": "rule:admin_api", "share:migration_complete": "rule:admin_api", "share:migration_cancel": "rule:admin_api", "share:migration_get_progress": "rule:admin_api", "share_export_location:index": "rule:default", "share_export_location:show": "rule:default", "share_type:index": "rule:default", "share_type:show": "rule:default", "share_type:default": "rule:default", "share_type:create": "rule:default", "share_type:delete": "rule:default", "share_type:add_project_access": "rule:admin_api", "share_type:list_project_access": "rule:admin_api", "share_type:remove_project_access": "rule:admin_api", "share_types_extra_spec:create": "rule:default", "share_types_extra_spec:update": "rule:default", "share_types_extra_spec:show": "rule:default", "share_types_extra_spec:index": "rule:default", "share_types_extra_spec:delete": "rule:default", "share_instance:index": "rule:admin_api", "share_instance:show": "rule:admin_api", "share_instance:force_delete": "rule:admin_api", "share_instance:reset_status": "rule:admin_api", "share_instance_export_location:index": "rule:admin_api", "share_instance_export_location:show": "rule:admin_api", "share_snapshot:force_delete": "rule:admin_api", "share_snapshot:reset_status": "rule:admin_api", "share_snapshot:manage_snapshot": "rule:admin_api", "share_snapshot:unmanage_snapshot": "rule:admin_api", "share_network:create": "", "share_network:index": "", "share_network:detail": "", "share_network:show": "", "share_network:update": "", "share_network:delete": "", "share_network:get_all_share_networks": "rule:admin_api", "share_server:index": "rule:admin_api", "share_server:show": "rule:admin_api", "share_server:details": "rule:admin_api", "share_server:delete": "rule:admin_api", "share:get_share_metadata": "", "share:delete_share_metadata": "", "share:update_share_metadata": "", "share_extension:availability_zones": "", "security_service:index": "", "security_service:get_all_security_services": "rule:admin_api", "scheduler_stats:pools:index": "rule:admin_api", "scheduler_stats:pools:detail": "rule:admin_api", "consistency_group:create" : "rule:default", "consistency_group:delete": "rule:default", "consistency_group:update": "rule:default", "consistency_group:get": "rule:default", "consistency_group:get_all": "rule:default", "consistency_group:create_cgsnapshot" : "rule:default", "consistency_group:delete_cgsnapshot": "rule:default", "consistency_group:force_delete": "rule:admin_api", "consistency_group:reset_status": "rule:admin_api", "consistency_group:get_cgsnapshot": "rule:default", "consistency_group:get_all_cgsnapshots": "rule:default", "cgsnapshot:force_delete": "rule:admin_api", "cgsnapshot:reset_status": "rule:admin_api", "share_replica:get_all": "rule:default", "share_replica:show": "rule:default", "share_replica:create" : "rule:default", "share_replica:delete": "rule:default", "share_replica:promote": "rule:default", "share_replica:resync": "rule:admin_api", "share_replica:reset_status": "rule:admin_api", "share_replica:force_delete": "rule:admin_api", "share_replica:reset_replica_state": "rule:admin_api" } manila-2.0.0/manila/tests/test_wsgi.py0000664000567000056710000002676712701407107021121 0ustar jenkinsjenkins00000000000000# Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for `manila.wsgi`.""" import os.path import ssl import tempfile import ddt import eventlet import mock from oslo_config import cfg from oslo_utils import netutils import six from six.moves import urllib import testtools import webob import webob.dec from manila.api.middleware import fault from manila import exception from manila import test from manila import utils import manila.wsgi CONF = cfg.CONF TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'var')) class TestLoaderNothingExists(test.TestCase): """Loader tests where os.path.exists always returns False.""" def test_config_not_found(self): self.assertRaises( manila.exception.ConfigNotFound, manila.wsgi.Loader, 'nonexistent_file.ini', ) class TestLoaderNormalFilesystem(test.TestCase): """Loader tests with normal filesystem (unmodified os.path module).""" _paste_config = """ [app:test_app] use = egg:Paste#static document_root = /tmp """ def setUp(self): super(TestLoaderNormalFilesystem, self).setUp() self.config = tempfile.NamedTemporaryFile(mode="w+t") self.config.write(self._paste_config.lstrip()) self.config.seek(0) self.config.flush() self.loader = manila.wsgi.Loader(self.config.name) self.addCleanup(self.config.close) def test_config_found(self): self.assertEqual(self.config.name, self.loader.config_path) def test_app_not_found(self): self.assertRaises( manila.exception.PasteAppNotFound, self.loader.load_app, "non-existent app", ) def test_app_found(self): url_parser = self.loader.load_app("test_app") self.assertEqual("/tmp", url_parser.directory) @ddt.ddt class TestWSGIServer(test.TestCase): """WSGI server tests.""" def test_no_app(self): server = manila.wsgi.Server("test_app", None, host="127.0.0.1", port=0) self.assertEqual("test_app", server.name) def test_start_random_port(self): server = manila.wsgi.Server("test_random_port", None, host="127.0.0.1") server.start() self.assertNotEqual(0, server.port) server.stop() server.wait() @testtools.skipIf(not utils.is_ipv6_configured(), "Test requires an IPV6 configured interface") @testtools.skipIf(utils.is_eventlet_bug105(), 'Eventlet bug #105 affect test results.') def test_start_random_port_with_ipv6(self): server = manila.wsgi.Server("test_random_port", None, host="::1") server.start() self.assertEqual("::1", server.host) self.assertNotEqual(0, server.port) server.stop() server.wait() def test_start_with_default_tcp_options(self): server = manila.wsgi.Server("test_tcp_options", None, host="127.0.0.1") self.mock_object( netutils, 'set_tcp_keepalive') server.start() netutils.set_tcp_keepalive.assert_called_once_with( mock.ANY, tcp_keepalive=True, tcp_keepalive_count=None, tcp_keepalive_interval=None, tcp_keepidle=600) def test_start_with_custom_tcp_options(self): CONF.set_default("tcp_keepalive", False) CONF.set_default("tcp_keepalive_count", 33) CONF.set_default("tcp_keepalive_interval", 22) CONF.set_default("tcp_keepidle", 11) server = manila.wsgi.Server("test_tcp_options", None, host="127.0.0.1") self.mock_object( netutils, 'set_tcp_keepalive') server.start() netutils.set_tcp_keepalive.assert_called_once_with( mock.ANY, tcp_keepalive=False, tcp_keepalive_count=33, tcp_keepalive_interval=22, tcp_keepidle=11) def test_app(self): self.mock_object( eventlet, 'spawn', mock.Mock(side_effect=eventlet.spawn)) greetings = 'Hello, World!!!' def hello_world(env, start_response): if env['PATH_INFO'] != '/': start_response('404 Not Found', [('Content-Type', 'text/plain')]) return ['Not Found\r\n'] start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = manila.wsgi.Server( "test_app", hello_world, host="127.0.0.1", port=0) server.start() response = urllib.request.urlopen('http://127.0.0.1:%d/' % server.port) self.assertEqual(six.b(greetings), response.read()) # Verify provided parameters to eventlet.spawn func eventlet.spawn.assert_called_once_with( func=eventlet.wsgi.server, sock=mock.ANY, site=server.app, protocol=server._protocol, custom_pool=server._pool, log=server._logger, socket_timeout=server.client_socket_timeout, keepalive=manila.wsgi.CONF.wsgi_keep_alive, ) server.stop() @ddt.data(0, 0.1, 1, None) def test_init_server_with_socket_timeout(self, client_socket_timeout): CONF.set_default("client_socket_timeout", client_socket_timeout) server = manila.wsgi.Server( "test_app", lambda *args, **kwargs: None, host="127.0.0.1", port=0) self.assertEqual(client_socket_timeout, server.client_socket_timeout) @testtools.skipIf(six.PY3, "bug/1482633") def test_app_using_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server( "test_app", hello_world, host="127.0.0.1", port=0) server.start() if hasattr(ssl, '_create_unverified_context'): response = urllib.request.urlopen( 'https://127.0.0.1:%d/' % server.port, context=ssl._create_unverified_context()) else: response = urllib.request.urlopen( 'https://127.0.0.1:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() @testtools.skipIf(not utils.is_ipv6_configured(), "Test requires an IPV6 configured interface") @testtools.skipIf(utils.is_eventlet_bug105(), 'Eventlet bug #105 affect test results.') @testtools.skipIf(six.PY3, "bug/1482633") def test_app_using_ipv6_and_ssl(self): CONF.set_default("ssl_cert_file", os.path.join(TEST_VAR_DIR, 'certificate.crt')) CONF.set_default("ssl_key_file", os.path.join(TEST_VAR_DIR, 'privatekey.key')) greetings = 'Hello, World!!!' @webob.dec.wsgify def hello_world(req): return greetings server = manila.wsgi.Server("test_app", hello_world, host="::1", port=0) server.start() if hasattr(ssl, '_create_unverified_context'): response = urllib.request.urlopen( 'https://[::1]:%d/' % server.port, context=ssl._create_unverified_context()) else: response = urllib.request.urlopen( 'https://[::1]:%d/' % server.port) self.assertEqual(greetings, response.read()) server.stop() def test_reset_pool_size_to_default(self): server = manila.wsgi.Server("test_resize", None, host="127.0.0.1") server.start() # Stopping the server, which in turn sets pool size to 0 server.stop() self.assertEqual(0, server._pool.size) # Resetting pool size to default server.reset() server.start() self.assertEqual(1000, server._pool.size) class ExceptionTest(test.TestCase): def _wsgi_app(self, inner_app): return fault.FaultWrapper(inner_app) def _do_test_exception_safety_reflected_in_faults(self, expose): class ExceptionWithSafety(exception.ManilaException): safe = expose @webob.dec.wsgify def fail(req): raise ExceptionWithSafety('some explanation') api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn('{"computeFault', six.text_type(resp.body), resp.body) expected = ('ExceptionWithSafety: some explanation' if expose else 'The server has either erred or is incapable ' 'of performing the requested operation.') self.assertIn(expected, six.text_type(resp.body), resp.body) self.assertEqual(500, resp.status_int, resp.body) def test_safe_exceptions_are_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(True) def test_unsafe_exceptions_are_not_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(False) def _do_test_exception_mapping(self, exception_type, msg): @webob.dec.wsgify def fail(req): raise exception_type(msg) api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn(msg, six.text_type(resp.body), resp.body) self.assertEqual(exception_type.code, resp.status_int, resp.body) if hasattr(exception_type, 'headers'): for (key, value) in exception_type.headers.items(): self.assertTrue(key in resp.headers) self.assertEqual(value, resp.headers[key]) def test_quota_error_mapping(self): self._do_test_exception_mapping(exception.QuotaError, 'too many used') def test_non_manila_notfound_exception_mapping(self): class ExceptionWithCode(Exception): code = 404 self._do_test_exception_mapping(ExceptionWithCode, 'NotFound') def test_non_manila_exception_mapping(self): class ExceptionWithCode(Exception): code = 417 self._do_test_exception_mapping(ExceptionWithCode, 'Expectation failed') def test_exception_with_none_code_throws_500(self): class ExceptionWithNoneCode(Exception): code = None @webob.dec.wsgify def fail(req): raise ExceptionWithNoneCode() api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertEqual(500, resp.status_int) manila-2.0.0/manila/tests/compute/0000775000567000056710000000000012701407265020176 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/compute/__init__.py0000664000567000056710000000000012701407107022270 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/compute/test_nova.py0000664000567000056710000003124612701407107022553 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from novaclient import exceptions as nova_exception from novaclient import utils from novaclient.v2 import servers as nova_servers from manila.compute import nova from manila import context from manila import exception from manila import test from manila.volume import cinder class Volume(object): def __init__(self, volume_id): self.id = volume_id self.name = volume_id class Network(object): def __init__(self, net_id): self.id = net_id self.label = 'fake_label_%s' % net_id class FakeNovaClient(object): class Servers(object): def get(self, instance_id): return {'id': instance_id} def list(self, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None class Volumes(object): def get(self, volume_id): return Volume(volume_id) def list(self, detailed, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None class Networks(object): def get(self, net_id): return Network(net_id) class FixedIPs(object): def get(self, fixed_ip): return dict(address=fixed_ip) def reserve(self, fixed_ip): return None def unreserve(self, fixed_ip): return None def __init__(self): self.servers = self.Servers() self.volumes = self.Volumes() self.keypairs = self.servers self.networks = self.Networks() self.fixed_ips = self.FixedIPs() @nova.translate_server_exception def decorated_by_translate_server_exception(self, context, instance_id, exc): if exc: raise exc(instance_id) else: return 'OK' @ddt.ddt class TranslateServerExceptionTestCase(test.TestCase): def test_translate_server_exception(self): result = decorated_by_translate_server_exception( 'foo_self', 'foo_ctxt', 'foo_instance_id', None) self.assertEqual('OK', result) def test_translate_server_exception_not_found(self): self.assertRaises( exception.InstanceNotFound, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', nova_exception.NotFound) def test_translate_server_exception_bad_request(self): self.assertRaises( exception.InvalidInput, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', nova_exception.BadRequest) @ddt.data( nova_exception.HTTPNotImplemented, nova_exception.RetryAfterException, nova_exception.Unauthorized, nova_exception.Forbidden, nova_exception.MethodNotAllowed, nova_exception.OverLimit, nova_exception.RateLimit, ) def test_translate_server_exception_other_exception(self, exc): self.assertRaises( exception.ManilaException, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', exc) @ddt.ddt class NovaApiTestCase(test.TestCase): def setUp(self): super(NovaApiTestCase, self).setUp() self.api = nova.API() self.novaclient = FakeNovaClient() self.ctx = context.get_admin_context() self.mock_object(nova, 'novaclient', mock.Mock(return_value=self.novaclient)) self.mock_object(nova, '_untranslate_server_summary_view', lambda server: server) def test_server_create(self): result = self.api.server_create(self.ctx, 'server_name', 'fake_image', 'fake_flavor', None, None, None) self.assertEqual('created_id', result['id']) def test_server_delete(self): self.mock_object(self.novaclient.servers, 'delete') self.api.server_delete(self.ctx, 'id1') self.novaclient.servers.delete.assert_called_once_with('id1') def test_server_get(self): instance_id = 'instance_id1' result = self.api.server_get(self.ctx, instance_id) self.assertEqual(instance_id, result['id']) def test_server_get_by_name_or_id(self): instance_id = 'instance_id1' server = {'id': instance_id, 'fake_key': 'fake_value'} self.mock_object(utils, 'find_resource', mock.Mock(return_value=server)) result = self.api.server_get_by_name_or_id(self.ctx, instance_id) self.assertEqual(instance_id, result['id']) utils.find_resource.assert_called_once_with(mock.ANY, instance_id) def test_server_get_by_name_or_id_failed(self): instance_id = 'instance_id1' server = {'id': instance_id, 'fake_key': 'fake_value'} self.mock_object(utils, 'find_resource', mock.Mock(return_value=server, side_effect=nova_exception.CommandError)) self.assertRaises(exception.ManilaException, self.api.server_get_by_name_or_id, self.ctx, instance_id) utils.find_resource.assert_any_call(mock.ANY, instance_id) utils.find_resource.assert_called_with(mock.ANY, instance_id, all_tenants=True) @ddt.data( {'nova_e': nova_exception.NotFound(404), 'manila_e': exception.InstanceNotFound}, {'nova_e': nova_exception.BadRequest(400), 'manila_e': exception.InvalidInput}, ) @ddt.unpack def test_server_get_failed(self, nova_e, manila_e): nova.novaclient.side_effect = nova_e instance_id = 'instance_id' self.assertRaises(manila_e, self.api.server_get, self.ctx, instance_id) def test_server_pause(self): self.mock_object(self.novaclient.servers, 'pause') self.api.server_pause(self.ctx, 'id1') self.novaclient.servers.pause.assert_called_once_with('id1') def test_server_unpause(self): self.mock_object(self.novaclient.servers, 'unpause') self.api.server_unpause(self.ctx, 'id1') self.novaclient.servers.unpause.assert_called_once_with('id1') def test_server_suspend(self): self.mock_object(self.novaclient.servers, 'suspend') self.api.server_suspend(self.ctx, 'id1') self.novaclient.servers.suspend.assert_called_once_with('id1') def test_server_resume(self): self.mock_object(self.novaclient.servers, 'resume') self.api.server_resume(self.ctx, 'id1') self.novaclient.servers.resume.assert_called_once_with('id1') def test_server_reboot_hard(self): self.mock_object(self.novaclient.servers, 'reboot') self.api.server_reboot(self.ctx, 'id1') self.novaclient.servers.reboot.assert_called_once_with( 'id1', nova_servers.REBOOT_HARD) def test_server_reboot_soft(self): self.mock_object(self.novaclient.servers, 'reboot') self.api.server_reboot(self.ctx, 'id1', True) self.novaclient.servers.reboot.assert_called_once_with( 'id1', nova_servers.REBOOT_SOFT) def test_server_rebuild(self): self.mock_object(self.novaclient.servers, 'rebuild') self.api.server_rebuild(self.ctx, 'id1', 'fake_image') self.novaclient.servers.rebuild.assert_called_once_with('id1', 'fake_image', None) def test_instance_volume_attach(self): self.mock_object(self.novaclient.volumes, 'create_server_volume') self.api.instance_volume_attach(self.ctx, 'instance_id', 'vol_id', 'device') self.novaclient.volumes.create_server_volume.\ assert_called_once_with('instance_id', 'vol_id', 'device') def test_instance_volume_detach(self): self.mock_object(self.novaclient.volumes, 'delete_server_volume') self.api.instance_volume_detach(self.ctx, 'instance_id', 'att_id') self.novaclient.volumes.delete_server_volume.\ assert_called_once_with('instance_id', 'att_id') def test_instance_volumes_list(self): self.mock_object( self.novaclient.volumes, 'get_server_volumes', mock.Mock(return_value=[Volume('id1'), Volume('id2')])) self.cinderclient = self.novaclient self.mock_object(cinder, 'cinderclient', mock.Mock(return_value=self.novaclient)) result = self.api.instance_volumes_list(self.ctx, 'instance_id') self.assertEqual(2, len(result)) self.assertEqual('id1', result[0].id) self.assertEqual('id2', result[1].id) def test_server_update(self): self.mock_object(self.novaclient.servers, 'update') self.api.server_update(self.ctx, 'id1', 'new_name') self.novaclient.servers.update.assert_called_once_with('id1', name='new_name') def test_update_server_volume(self): self.mock_object(self.novaclient.volumes, 'update_server_volume') self.api.update_server_volume(self.ctx, 'instance_id', 'att_id', 'new_vol_id') self.novaclient.volumes.update_server_volume.\ assert_called_once_with('instance_id', 'att_id', 'new_vol_id') def test_keypair_create(self): self.mock_object(self.novaclient.keypairs, 'create') self.api.keypair_create(self.ctx, 'keypair_name') self.novaclient.keypairs.create.assert_called_once_with('keypair_name') def test_keypair_import(self): self.mock_object(self.novaclient.keypairs, 'create') self.api.keypair_import(self.ctx, 'keypair_name', 'fake_pub_key') self.novaclient.keypairs.create.\ assert_called_once_with('keypair_name', 'fake_pub_key') def test_keypair_delete(self): self.mock_object(self.novaclient.keypairs, 'delete') self.api.keypair_delete(self.ctx, 'fake_keypair_id') self.novaclient.keypairs.delete.\ assert_called_once_with('fake_keypair_id') def test_keypair_list(self): self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.keypair_list(self.ctx)) def test_fixed_ip_get(self): fixed_ip = 'fake_fixed_ip' result = self.api.fixed_ip_get(self.ctx, fixed_ip) self.assertIsInstance(result, dict) self.assertEqual(fixed_ip, result['address']) def test_fixed_ip_reserve(self): fixed_ip = 'fake_fixed_ip' result = self.api.fixed_ip_reserve(self.ctx, fixed_ip) self.assertIsNone(result) def test_fixed_ip_unreserve(self): fixed_ip = 'fake_fixed_ip' result = self.api.fixed_ip_unreserve(self.ctx, fixed_ip) self.assertIsNone(result) def test_network_get(self): net_id = 'fake_net_id' net = self.api.network_get(self.ctx, net_id) self.assertIsInstance(net, dict) self.assertEqual(net_id, net['id']) class ToDictTestCase(test.TestCase): def test_dict_provided(self): fake_dict = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} result = nova._to_dict(fake_dict) self.assertEqual(fake_dict, result) def test_obj_provided_with_to_dict_method(self): expected = {'foo': 'bar'} class FakeObj(object): def __init__(self): self.fake_attr = 'fake_attr_value' def to_dict(self): return expected fake_obj = FakeObj() result = nova._to_dict(fake_obj) self.assertEqual(expected, result) def test_obj_provided_without_to_dict_method(self): expected = {'foo': 'bar'} class FakeObj(object): def __init__(self): self.foo = expected['foo'] fake_obj = FakeObj() result = nova._to_dict(fake_obj) self.assertEqual(expected, result) manila-2.0.0/manila/tests/conf_fixture.py0000664000567000056710000000456512701407107021574 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_policy import opts from manila.common import config CONF = config.CONF def set_defaults(conf): _safe_set_of_opts(conf, 'verbose', True) _safe_set_of_opts(conf, 'state_path', os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..'))) _safe_set_of_opts(conf, 'connection', "sqlite://", group='database') _safe_set_of_opts(conf, 'sqlite_synchronous', False) _POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path, 'manila/tests/policy.json')) opts.set_defaults(conf, policy_file=_POLICY_PATH) _safe_set_of_opts(conf, 'share_export_ip', '0.0.0.0') _safe_set_of_opts(conf, 'service_instance_user', 'fake_user') _API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path, 'etc/manila/api-paste.ini')) _safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH) _safe_set_of_opts(conf, 'share_driver', 'manila.tests.fake_driver.FakeShareDriver') _safe_set_of_opts(conf, 'auth_strategy', 'noauth') _safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1') _safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2') _safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar']) _safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper') _safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_') def _safe_set_of_opts(conf, *args, **kwargs): try: conf.set_default(*args, **kwargs) except config.cfg.NoSuchOptError: # Assumed that opt is not imported and not used pass manila-2.0.0/manila/tests/declare_conf.py0000664000567000056710000000152012701407107021471 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('answer', default=42, help='test conf')) manila-2.0.0/manila/tests/test_context.py0000664000567000056710000000650712701407107021622 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import context from manila import test class ContextTestCase(test.TestCase): def test_request_context_elevated(self): user_context = context.RequestContext( 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) self.assertEqual([], user_context.roles) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertFalse('admin' in user_context.roles) self.assertTrue('admin' in admin_context.roles) def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) def test_extra_args_to_context_get_logged(self): info = {} def fake_warn(log_msg, other_args): info['log_msg'] = log_msg % other_args self.mock_object(context.LOG, 'warning', fake_warn) c = context.RequestContext('user', 'project', extra_arg1='meow', extra_arg2='wuff', user='user', tenant='project') self.assertTrue(c) self.assertIn("'extra_arg1': 'meow'", info['log_msg']) self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) # user and tenant kwargs get popped off before we log anything self.assertNotIn("'user': 'user'", info['log_msg']) self.assertNotIn("'tenant': 'project'", info['log_msg']) manila-2.0.0/manila/tests/var/0000775000567000056710000000000012701407265017312 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/var/ca.crt0000664000567000056710000000415712701407107020411 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX /l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ 4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm 2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ +C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY 9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA WoRMgEwjGJWqzhJZUYpUAQ== -----END CERTIFICATE----- manila-2.0.0/manila/tests/var/certificate.crt0000664000567000056710000000350212701407107022301 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- manila-2.0.0/manila/tests/var/privatekey.key0000664000567000056710000000625312701407107022210 0ustar jenkinsjenkins00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- manila-2.0.0/manila/tests/fake_compute.py0000664000567000056710000000576212701407107021543 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF class FakeServer(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_id') self.status = kwargs.pop('status', 'ACTIVE') self.networks = kwargs.pop('networks', {'fake_net': 'fake_net_value'}) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) def get(self, attr, default): return getattr(self, attr, default) def update(self, *args, **kwargs): pass class FakeKeypair(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_keypair_id') for key, value in kwargs.items(): setattr(self, key, value) class FakeImage(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_image_id') for key, value in kwargs.items(): setattr(self, key, value) class FakeSecurityGroup(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_security_group_id') self.name = kwargs.pop('name', 'fake_security_group_name') for key, value in kwargs.items(): setattr(self, key, value) class API(object): """Fake Compute API.""" def instance_volume_attach(self, ctx, server_id, volume_id, mount_path): pass def instance_volume_detach(self, ctx, server_id, volume_id): pass def instance_volumes_list(self, ctx, server_id): pass def server_create(self, *args, **kwargs): pass def server_delete(self, *args, **kwargs): pass def server_get(self, *args, **kwargs): pass def server_get_by_name_or_id(self, *args, **kwargs): pass def server_reboot(self, *args, **kwargs): pass def keypair_list(self, *args, **kwargs): pass def keypair_import(self, *args, **kwargs): pass def keypair_delete(self, *args, **kwargs): pass def image_list(self, *args, **kwargs): pass def security_group_create(self, *args, **kwargs): pass def security_group_list(self, *args, **kwargs): pass def add_security_group_to_server(self, *args, **kwargs): pass def security_group_rule_create(self, *args, **kwargs): pass manila-2.0.0/manila/tests/integrated/0000775000567000056710000000000012701407265020650 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/integrated/integrated_helpers.py0000664000567000056710000000775612701407107025104 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides common functionality for integrated unit tests """ import random import string import uuid from oslo_log import log from manila import service from manila import test # For the flags from manila.tests.integrated.api import client from oslo_config import cfg CONF = cfg.CONF LOG = log.getLogger(__name__) def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate LOG.debug("Random collision on %s.", candidate) class _IntegratedTestBase(test.TestCase): def setUp(self): super(_IntegratedTestBase, self).setUp() f = self._get_flags() self.flags(**f) self.flags(verbose=True) # set up services self.volume = self.start_service('share') self.scheduler = self.start_service('scheduler') self._start_api_service() self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) def tearDown(self): self.osapi.stop() super(_IntegratedTestBase, self).tearDown() def _start_api_service(self): self.osapi = service.WSGIService("osapi_share") self.osapi.start() # FIXME(ja): this is not the auth url - this is the service url # FIXME(ja): this needs fixed in nova as well self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) LOG.warning(self.auth_url) def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} # Ensure tests only listen on localhost f['osapi_share_listen'] = '127.0.0.1' # Auto-assign ports to allow concurrent tests f['osapi_share_listen_port'] = 0 return f def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_invalid_image(self): return str(uuid.uuid4()) def _build_minimal_create_server_request(self): server = {} image = self.api.get_images()[0] LOG.debug("Image: %s.", image) if 'imageRef' in image: image_href = image['imageRef'] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] LOG.debug("Using flavor: %s.", flavor) server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server manila-2.0.0/manila/tests/integrated/__init__.py0000664000567000056710000000000012701407107022742 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/integrated/test_login.py0000664000567000056710000000177112701407107023372 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.tests.integrated import integrated_helpers LOG = log.getLogger(__name__) class LoginTest(integrated_helpers._IntegratedTestBase): def test_login(self): """Simple check - we list shares - so we know we're logged in.""" shares = self.api.get_shares() for share in shares: LOG.debug("share: %s", share) manila-2.0.0/manila/tests/integrated/api/0000775000567000056710000000000012701407265021421 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/integrated/api/__init__.py0000664000567000056710000000133512701407107023527 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`api` -- OpenStack API client, for testing rather than production ================================= """ manila-2.0.0/manila/tests/integrated/api/client.py0000664000567000056710000001721012701407107023245 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import jsonutils from six.moves import http_client from six.moves.urllib import parse LOG = log.getLogger(__name__) class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: _status = response.status _body = response.read() message = ('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s') % { "message": message, "_status": _status, "_body": _body } super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authentication error" super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authorization error" super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Item not found" super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri # default project_id self.project_id = 'openstack' def request(self, url, method='GET', body=None, headers=None): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = parse.urlparse(url) port = parsed_url.port hostname = parsed_url.hostname scheme = parsed_url.scheme if scheme == 'http': conn = http_client.HTTPConnection(hostname, port=port) elif scheme == 'https': conn = http_client.HTTPSConnection(hostname, port=port) else: raise OpenStackApiException("Unknown scheme: %s" % url) relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query LOG.info("Doing %(method)s on %(relative_url)s", {"method": method, "relative_url": relative_url}) if body: LOG.info("Body: %s", body) conn.request(method, relative_url, body, _headers) response = conn.getresponse() return response def _authenticate(self): if self.auth_result: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status LOG.debug("%(auth_uri)s => code %(http_status)s.", {"auth_uri": auth_uri, "http_status": http_status}) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) auth_headers = {} for k, v in response.getheaders(): auth_headers[k.lower()] = v self.auth_result = auth_headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() base_uri = auth_result['x-server-management-url'] full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) http_status = response.status LOG.debug("%(relative_uri)s => code %(http_status)s.", {"relative_uri": relative_uri, "http_status": http_status}) if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message="Unexpected status code", response=response) return response def _decode_json(self, response): body = response.read() LOG.debug("Decoding JSON: %s.", (body)) if body: return jsonutils.loads(body) else: return "" def api_options(self, relative_uri, **kwargs): kwargs['method'] = 'OPTIONS' kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_shares(self, detail=True): rel_url = '/shares/detail' if detail else '/shares' return self.api_get(rel_url)['shares'] manila-2.0.0/manila/tests/integrated/test_extensions.py0000664000567000056710000000264012701407107024455 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import six from manila.tests.integrated import integrated_helpers CONF = cfg.CONF LOG = log.getLogger(__name__) class ExtensionsTest(integrated_helpers._IntegratedTestBase): def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() f['osapi_share_extension'] = CONF.osapi_share_extension[:] f['osapi_share_extension'].append( 'manila.tests.api.extensions.foxinsocks.Foxinsocks') return f def test_get_foxnsocks(self): """Simple check that fox-n-socks works.""" response = self.api.api_request('/foxnsocks') foxnsocks = response.read() LOG.debug("foxnsocks: %s.", foxnsocks) self.assertEqual(six.b('Try to say this Mr. Knox, sir...'), foxnsocks) manila-2.0.0/manila/tests/scheduler/0000775000567000056710000000000012701407265020500 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/test_scheduler_options.py0000664000567000056710000001211612701407107025636 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For scheduler options. """ import datetime from oslo_serialization import jsonutils import six from manila.scheduler import scheduler_options from manila import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = six.b(filedata) self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True if six.PY2: import StringIO return StringIO.StringIO(self._file_data) else: import io return io.BytesIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.TestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) manila-2.0.0/manila/tests/scheduler/__init__.py0000664000567000056710000000000012701407107022572 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/drivers/0000775000567000056710000000000012701407265022156 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/drivers/__init__.py0000664000567000056710000000000012701407107024250 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/drivers/test_filter.py0000664000567000056710000005775112701407107025066 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ import ddt import mock from oslo_utils import strutils from manila.common import constants from manila import context from manila import exception from manila.scheduler.drivers import base from manila.scheduler.drivers import filter from manila.scheduler import host_manager from manila.tests.scheduler.drivers import test_base from manila.tests.scheduler import fakes SNAPSHOT_SUPPORT = constants.ExtraSpecs.SNAPSHOT_SUPPORT REPLICATION_TYPE_SPEC = constants.ExtraSpecs.REPLICATION_TYPE_SPEC @ddt.ddt class FilterSchedulerTestCase(test_base.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter.FilterScheduler def test___format_filter_properties_active_replica_host_is_provided(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': {'name': 'NFS'}, 'share_id': ['fake-id1'], 'active_replica_host': 'fake_ar_host', } hosts = [fakes.FakeHostState(host, {'replication_domain': 'xyzzy'}) for host in ('fake_ar_host', 'fake_host_2')] self.mock_object(sched.host_manager, 'get_all_host_states_share', mock.Mock(return_value=hosts)) self.mock_object(sched, 'populate_filter_properties_share') retval = sched._format_filter_properties( fake_context, {}, request_spec) self.assertTrue('replication_domain' in retval[0]) def test_create_share_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': {'name': 'NFS'}, 'share_id': ['fake-id1'], } self.assertRaises(exception.NoValidHost, sched.schedule_create_share, fake_context, request_spec, {}) @mock.patch('manila.scheduler.host_manager.HostManager.' 'get_all_host_states_share') def test_create_share_non_admin(self, _mock_get_all_host_states): # Test creating a volume locally using create_volume, passing # a non-admin context. DB actions should work. self.was_admin = False def fake_get(context, *args, **kwargs): # Make sure this is called with admin context, even though # we're using user context below. self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() _mock_get_all_host_states.side_effect = fake_get fake_context = context.RequestContext('user', 'project') request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': {'name': 'NFS'}, 'share_id': ['fake-id1'], } self.assertRaises(exception.NoValidHost, sched.schedule_create_share, fake_context, request_spec, {}) self.assertTrue(self.was_admin) @ddt.data( {'name': 'foo'}, {'name': 'foo', 'extra_specs': {}}, *[{'name': 'foo', 'extra_specs': {SNAPSHOT_SUPPORT: v}} for v in ('True', ' True', 'true', '1')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_snapshot_support( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, SNAPSHOT_SUPPORT)) expected_snapshot_support = strutils.bool_from_string( share_type.get('extra_specs', {}).get( SNAPSHOT_SUPPORT, 'True').split()[-1]) self.assertEqual( expected_snapshot_support, getattr(weighed_host.obj, SNAPSHOT_SUPPORT)) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': {SNAPSHOT_SUPPORT: v}} for v in ('False', ' False', 'false', '0')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_without_snapshot_support( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNone(weighed_host) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', REPLICATION_TYPE_SPEC: v }} for v in ('writable', 'readable', 'dr')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_valid_replication_spec( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, REPLICATION_TYPE_SPEC)) expected_replication_type_support = ( share_type.get('extra_specs', {}).get(REPLICATION_TYPE_SPEC)) self.assertEqual( expected_replication_type_support, getattr(weighed_host.obj, REPLICATION_TYPE_SPEC)) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', REPLICATION_TYPE_SPEC: v }} for v in ('None', 'readwrite', 'activesync')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_invalid_replication_type_spec( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNone(weighed_host) self.assertTrue(_mock_service_get_all_by_topic.called) @mock.patch('manila.db.service_get_all_by_topic') def test_schedule_share_with_cg_pool_support( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': { 'name': 'NFS', 'extra_specs': {'consistency_group_support': 'pool'} }, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, 'consistency_group': { 'id': 'fake-cg-id', 'host': 'host5#_pool0', } } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertEqual('host5#_pool0', weighed_host.obj.host) self.assertTrue(_mock_service_get_all_by_topic.called) def _setup_dedupe_fakes(self, extra_specs): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) share_type = {'name': 'foo', 'extra_specs': extra_specs} request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } return sched, fake_context, request_spec @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_default_dedupe_value( self, _mock_service_get_all_by_topic): sched, fake_context, request_spec = self._setup_dedupe_fakes( {'capabilities:dedupe': ' False'}) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, 'dedupe')) self.assertFalse(weighed_host.obj.dedupe) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data('True', ' True') @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_default_dedupe_value_fail( self, capability, _mock_service_get_all_by_topic): sched, fake_context, request_spec = self._setup_dedupe_fakes( {'capabilities:dedupe': capability}) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNone(weighed_host) self.assertTrue(_mock_service_get_all_by_topic.called) def test_schedule_share_type_is_none(self): sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': None, 'share_properties': {'project_id': 1, 'size': 1}, } self.assertRaises(exception.InvalidParameterValue, sched._schedule_share, self.context, request_spec) @mock.patch('manila.db.service_get_all_by_topic') def test_schedule_share_with_instance_properties( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) share_type = {'name': 'foo'} request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'availability_zone_id': "fake_az"}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNone(weighed_host) self.assertTrue(_mock_service_get_all_by_topic.called) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) def test_retry_disabled(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } filter_properties = {} sched._schedule_share(self.context, request_spec, filter_properties=filter_properties) # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties) def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } filter_properties = {} sched._schedule_share(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } retry = dict(num_attempts=1) filter_properties = dict(retry=retry) sched._schedule_share(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts) def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, } retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule_share, self.context, request_spec, filter_properties=filter_properties) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host') host_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual(1024, host_state.total_capacity_gb) def test_schedule_create_consistency_group(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') fake_host = 'fake_host' request_spec = {'share_types': [{'id': 'NFS'}]} self.mock_object(sched, "_get_best_host_for_consistency_group", mock.Mock(return_value=fake_host)) fake_updated_group = mock.Mock() self.mock_object(base, "cg_update_db", mock.Mock( return_value=fake_updated_group)) self.mock_object(sched.share_rpcapi, "create_consistency_group") sched.schedule_create_consistency_group(fake_context, 'fake_id', request_spec, {}) sched._get_best_host_for_consistency_group.assert_called_once_with( fake_context, request_spec) base.cg_update_db.assert_called_once_with( fake_context, 'fake_id', fake_host) sched.share_rpcapi.create_consistency_group.assert_called_once_with( fake_context, fake_updated_group, fake_host) def test_create_cg_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'share_types': [{'id': 'NFS'}]} self.assertRaises(exception.NoValidHost, sched.schedule_create_consistency_group, fake_context, 'fake_id', request_spec, {}) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_consistency_group( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: True, }}]} hosts = sched._get_weighted_candidates_cg(fake_context, request_spec) self.assertTrue(hosts) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_consistency_group_no_hosts( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: False }}]} hosts = sched._get_weighted_candidates_cg(fake_context, request_spec) self.assertEqual([], hosts) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_consistency_group_many_hosts( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: True }}]} hosts = sched._get_weighted_candidates_cg(fake_context, request_spec) self.assertEqual(2, len(hosts)) def _host_passes_filters_setup(self, mock_obj): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(mock_obj) return (sched, fake_context) @mock.patch('manila.db.service_get_all_by_topic') def test_host_passes_filters_happy_day(self, _mock_service_get_topic): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'share_id': 1, 'share_type': {'name': 'fake_type'}, 'share_instance_properties': {}, 'share_properties': {'project_id': 1, 'size': 1}} ret_host = sched.host_passes_filters(ctx, 'host1#_pool0', request_spec, {}) self.assertEqual('host1#_pool0', ret_host.host) self.assertTrue(_mock_service_get_topic.called) @mock.patch('manila.db.service_get_all_by_topic') def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'share_id': 1, 'share_type': {'name': 'fake_type'}, 'share_instance_properties': {}, 'share_properties': {'project_id': 1, 'size': 1024}} self.assertRaises(exception.NoValidHost, sched.host_passes_filters, ctx, 'host3#_pool0', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) def test_schedule_create_replica_no_host(self): sched = fakes.FakeFilterScheduler() request_spec = fakes.fake_replica_request_spec() self.mock_object(self.driver_cls, '_schedule_share', mock.Mock(return_value=None)) self.assertRaises(exception.NoValidHost, sched.schedule_create_replica, self.context, request_spec, {}) def test_schedule_create_replica(self): sched = fakes.FakeFilterScheduler() request_spec = fakes.fake_replica_request_spec() host = 'fake_host' replica_id = request_spec['share_instance_properties']['id'] mock_update_db_call = self.mock_object( base, 'share_replica_update_db', mock.Mock(return_value='replica')) mock_share_rpcapi_call = self.mock_object( sched.share_rpcapi, 'create_share_replica') self.mock_object( self.driver_cls, '_schedule_share', mock.Mock(return_value=fakes.get_fake_host(host_name=host))) retval = sched.schedule_create_replica( self.context, fakes.fake_replica_request_spec(), {}) self.assertIsNone(retval) mock_update_db_call.assert_called_once_with( self.context, replica_id, host) mock_share_rpcapi_call.assert_called_once_with( self.context, 'replica', host, request_spec=request_spec, filter_properties={}) manila-2.0.0/manila/tests/scheduler/drivers/test_base.py0000664000567000056710000000754412701407107024506 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Base Scheduler """ import mock from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila import db from manila.scheduler.drivers import base from manila import test from manila import utils CONF = cfg.CONF class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = base.Scheduler def setUp(self): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} with mock.patch.object(self.driver.host_manager, 'update_service_capabilities', mock.Mock()): self.driver.update_service_capabilities( service_name, host, capabilities) self.driver.host_manager.update_service_capabilities.\ assert_called_once_with(service_name, host, capabilities) def test_hosts_up(self): service1 = {'host': 'host1'} service2 = {'host': 'host2'} services = [service1, service2] def fake_service_is_up(*args, **kwargs): if args[0]['host'] == 'host1': return False return True with mock.patch.object(db, 'service_get_all_by_topic', mock.Mock(return_value=services)): with mock.patch.object(utils, 'service_is_up', mock.Mock(side_effect=fake_service_is_up)): result = self.driver.hosts_up(self.context, self.topic) self.assertEqual(['host2'], result) db.service_get_all_by_topic.assert_called_once_with( self.context, self.topic) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test cases for base scheduler driver class methods. These can't fail if the driver is changed. """ def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, self.context, self.topic, 'schedule_something', *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext('fake_user', 'fake_project') @mock.patch.object(db, 'share_update', mock.Mock()) def test_share_host_update_db(self): with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value='fake-now')): base.share_update_db(self.context, 31337, 'fake_host') db.share_update.assert_called_once_with( self.context, 31337, {'host': 'fake_host', 'scheduled_at': 'fake-now'}) manila-2.0.0/manila/tests/scheduler/drivers/test_simple.py0000664000567000056710000001607312701407107025062 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Simple Scheduler """ import mock from oslo_config import cfg from manila import context from manila import db from manila import exception from manila.scheduler.drivers import base from manila.scheduler.drivers import simple from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils from manila import utils CONF = cfg.CONF class SimpleSchedulerSharesTestCase(test.TestCase): """Test case for simple scheduler create share method.""" def setUp(self): super(SimpleSchedulerSharesTestCase, self).setUp() self.mock_object(share_rpcapi, 'ShareAPI') self.driver = simple.SimpleScheduler() self.context = context.RequestContext('fake_user', 'fake_project') self.admin_context = context.RequestContext('fake_admin_user', 'fake_project') self.admin_context.is_admin = True @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_if_two_services_up(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 2), (fake_service_2, 1)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(utils.IsAMatcher(dict)) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, 'fake_host1') def test_create_share_if_services_not_available(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_result = [] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } with mock.patch.object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)): self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) def test_create_share_if_max_gigabytes_exceeded(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 10001} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 5), (fake_service_2, 7)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } with mock.patch.object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)): self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_availability_zone(self): share_id = 'fake' fake_share = { 'id': share_id, 'size': 1, } fake_instance = { 'availability_zone_id': 'fake', } fake_service_1 = { 'disabled': False, 'host': 'fake_host1', 'availability_zone_id': 'fake', } fake_service_2 = { 'disabled': False, 'host': 'fake_host2', 'availability_zone_id': 'super_fake', } fake_result = [(fake_service_1, 0), (fake_service_2, 1)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, 'share_instance_properties': fake_instance, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(fake_service_1) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, fake_service_1['host']) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_availability_zone_on_host(self): share_id = 'fake' fake_share = { 'id': share_id, 'availability_zone': 'fake:fake', 'size': 1, } fake_service = {'disabled': False, 'host': 'fake'} fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=[(fake_service, 1)])) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.admin_context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(fake_service) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, 'fake') manila-2.0.0/manila/tests/scheduler/test_host_manager.py0000664000567000056710000010125312701407107024555 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack, LLC # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For HostManager """ import copy import ddt import mock from oslo_config import cfg from oslo_utils import timeutils from six import moves from manila import db from manila import exception from manila.scheduler.filters import base_host from manila.scheduler import host_manager from manila import test from manila.tests.scheduler import fakes from manila import utils CONF = cfg.CONF class FakeFilterClass1(base_host.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(base_host.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass @ddt.ddt class HostManagerTestCase(test.TestCase): """Test case for HostManager class.""" def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x) for x in moves.range(1, 5)] def test_choose_host_filters_not_found(self): self.flags(scheduler_default_filters='FakeFilterClass3') self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, None) def test_choose_host_filters(self): self.flags(scheduler_default_filters=['FakeFilterClass2']) self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] # Test 'share' returns 1 correct function filter_classes = self.host_manager._choose_host_filters(None) self.assertEqual(1, len(filter_classes)) self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) def _verify_result(self, info, result): for x in info['got_fprops']: self.assertEqual(info['expected_fprops'], x) self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) self.assertEqual(set(info['got_objs']), set(result)) def test_get_filtered_hosts(self): fake_properties = {'moo': 1, 'cow': 2} info = { 'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties, } with mock.patch.object(self.host_manager, '_choose_host_filters', mock.Mock(return_value=[FakeFilterClass1])): info['got_objs'] = [] info['got_fprops'] = [] def fake_filter_one(_self, obj, filter_props): info['got_objs'].append(obj) info['got_fprops'].append(filter_props) return True self.mock_object(FakeFilterClass1, '_filter_one', fake_filter_one) result = self.host_manager.get_filtered_hosts(self.fake_hosts, fake_properties) self._verify_result(info, result) self.host_manager._choose_host_filters.assert_called_once_with( mock.ANY) def test_update_service_capabilities_for_shares(self): service_states = self.host_manager.service_states self.assertDictMatch(service_states, {}) host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1) service_name = 'share' with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=31337)): self.host_manager.update_service_capabilities( service_name, 'host1', host1_share_capabs) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=31338)): self.host_manager.update_service_capabilities( service_name, 'host2', host2_share_capabs) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=31339)): self.host_manager.update_service_capabilities( service_name, 'host3', host3_share_capabs) timeutils.utcnow.assert_called_once_with() # Make sure dictionary isn't re-assigned self.assertEqual(service_states, self.host_manager.service_states) # Make sure original dictionary wasn't copied self.assertEqual(1, host1_share_capabs['timestamp']) host1_share_capabs['timestamp'] = 31337 host2_share_capabs['timestamp'] = 31338 host3_share_capabs['timestamp'] = 31339 expected = { 'host1': host1_share_capabs, 'host2': host2_share_capabs, 'host3': host3_share_capabs, } self.assertDictMatch(service_states, expected) def test_get_all_host_states_share(self): context = 'fake_context' topic = CONF.share_topic tmp_pools = copy.deepcopy(fakes.SHARE_SERVICES_WITH_POOLS) tmp_enable_pools = tmp_pools[:-2] self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=tmp_enable_pools)) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): # Get service self.host_manager.get_all_host_states_share(context) # Disabled one service tmp_enable_pools.pop() self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=tmp_enable_pools)) # Get service again self.host_manager.get_all_host_states_share(context) host_state_map = self.host_manager.host_state_map self.assertEqual(3, len(host_state_map)) # Check that service is up for i in moves.range(3): share_node = fakes.SHARE_SERVICES_WITH_POOLS[i] host = share_node['host'] self.assertEqual(share_node, host_state_map[host].service) db.service_get_all_by_topic.assert_called_once_with(context, topic) def test_get_pools_no_pools(self): context = 'fake_context' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SERVICE_STATES_NO_POOLS): res = self.host_manager.get_pools(context) expected = [ { 'name': 'host1#AAA', 'host': 'host1', 'backend': None, 'pool': 'AAA', 'capabilities': { 'timestamp': None, 'share_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'provisioned_capacity_gb': 312, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': False, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host2@back1#BBB', 'host': 'host2', 'backend': 'back1', 'pool': 'BBB', 'capabilities': { 'timestamp': None, 'share_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host2@back2#CCC', 'host': 'host2', 'backend': 'back2', 'pool': 'CCC', 'capabilities': { 'timestamp': None, 'share_backend_name': 'CCC', 'free_capacity_gb': 700, 'driver_version': None, 'total_capacity_gb': 10000, 'reserved_percentage': 0, 'provisioned_capacity_gb': 50000, 'max_over_subscription_ratio': 20.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, ] self.assertIsInstance(res, list) self.assertEqual(len(expected), len(res)) for pool in expected: self.assertIn(pool, res) def test_get_pools(self): context = 'fake_context' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): res = self.host_manager.get_pools(context) expected = [ { 'name': 'host1@AAA#pool1', 'host': 'host1', 'backend': 'AAA', 'pool': 'pool1', 'capabilities': { 'pool_name': 'pool1', 'timestamp': None, 'share_backend_name': 'AAA', 'free_capacity_gb': 41, 'driver_version': None, 'total_capacity_gb': 51, 'reserved_percentage': 0, 'provisioned_capacity_gb': 10, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host2@BBB#pool2', 'host': 'host2', 'backend': 'BBB', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'timestamp': None, 'share_backend_name': 'BBB', 'free_capacity_gb': 42, 'driver_version': None, 'total_capacity_gb': 52, 'reserved_percentage': 0, 'provisioned_capacity_gb': 60, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host3@CCC#pool3', 'host': 'host3', 'backend': 'CCC', 'pool': 'pool3', 'capabilities': { 'pool_name': 'pool3', 'timestamp': None, 'share_backend_name': 'CCC', 'free_capacity_gb': 43, 'driver_version': None, 'total_capacity_gb': 53, 'reserved_percentage': 0, 'provisioned_capacity_gb': 100, 'max_over_subscription_ratio': 20.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': 'pool', 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host4@DDD#pool4a', 'host': 'host4', 'backend': 'DDD', 'pool': 'pool4a', 'capabilities': { 'pool_name': 'pool4a', 'timestamp': None, 'share_backend_name': 'DDD', 'free_capacity_gb': 441, 'driver_version': None, 'total_capacity_gb': 541, 'reserved_percentage': 0, 'provisioned_capacity_gb': 800, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': 'host', 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host4@DDD#pool4b', 'host': 'host4', 'backend': 'DDD', 'pool': 'pool4b', 'capabilities': { 'pool_name': 'pool4b', 'timestamp': None, 'share_backend_name': 'DDD', 'free_capacity_gb': 442, 'driver_version': None, 'total_capacity_gb': 542, 'reserved_percentage': 0, 'provisioned_capacity_gb': 2000, 'max_over_subscription_ratio': 10.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'consistency_group_support': 'host', 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, ] self.assertIsInstance(res, list) self.assertIsInstance(self.host_manager.host_state_map, dict) self.assertEqual(len(expected), len(res)) for pool in expected: self.assertIn(pool, res) def test_get_pools_host_down(self): context = 'fake_context' mock_service_is_up = self.mock_object(utils, 'service_is_up') self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SERVICE_STATES_NO_POOLS): # Initialize host data with all services present mock_service_is_up.side_effect = [True, True, True] # Call once to update the host state map self.host_manager.get_pools(context) self.assertEqual(len(fakes.SHARE_SERVICES_NO_POOLS), len(self.host_manager.host_state_map)) # Then mock one host as down mock_service_is_up.side_effect = [True, True, False] res = self.host_manager.get_pools(context) expected = [ { 'name': 'host1#AAA', 'host': 'host1', 'backend': None, 'pool': 'AAA', 'capabilities': { 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': False, 'share_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 312, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, { 'name': 'host2@back1#BBB', 'host': 'host2', 'backend': 'back1', 'pool': 'BBB', 'capabilities': { 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'share_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, ] self.assertIsInstance(res, list) self.assertIsInstance(self.host_manager.host_state_map, dict) self.assertEqual(len(expected), len(res)) self.assertEqual(len(expected), len(self.host_manager.host_state_map)) for pool in expected: self.assertIn(pool, res) def test_get_pools_with_filters(self): context = 'fake_context' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): res = self.host_manager.get_pools( context, filters={'host': 'host2', 'pool': 'pool*'}) expected = [ { 'name': 'host2@BBB#pool2', 'host': 'host2', 'backend': 'BBB', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'share_backend_name': 'BBB', 'free_capacity_gb': 42, 'driver_version': None, 'total_capacity_gb': 52, 'reserved_percentage': 0, 'provisioned_capacity_gb': 60, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'consistency_group_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, }, }, ] self.assertEqual(len(expected), len(res)) self.assertEqual(sorted(expected), sorted(res)) @ddt.data( None, {}, {'key1': 'value1'}, {'key1': 'value1', 'key2': 'value*'}, {'key1': '.*', 'key2': '.*'}, ) def test_passes_filters_true(self, filter): data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} self.assertTrue(self.host_manager._passes_filters(data, filter)) @ddt.data( {'key1': 'value$'}, {'key4': 'value'}, {'key1': 'value1.+', 'key2': 'value*'}, ) def test_passes_filters_false(self, filter): data = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} self.assertFalse(self.host_manager._passes_filters(data, filter)) class HostStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_share_capability_nopool(self): share_capability = {'total_capacity_gb': 0, 'free_capacity_gb': 100, 'reserved_percentage': 0, 'timestamp': None} fake_host = host_manager.HostState('host1', share_capability) self.assertIsNone(fake_host.free_capacity_gb) fake_host.update_from_share_capability(share_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(0, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(100, fake_host.pools['_pool0'].free_capacity_gb) # Test update for existing host state share_capability.update(dict(total_capacity_gb=1000)) fake_host.update_from_share_capability(share_capability) self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb) # Test update for existing host state with different backend name share_capability.update(dict(share_backend_name='magic')) fake_host.update_from_share_capability(share_capability) self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb) self.assertEqual(100, fake_host.pools['magic'].free_capacity_gb) # 'pool0' becomes nonactive pool, and is deleted self.assertRaises(KeyError, lambda: fake_host.pools['pool0']) def test_update_from_share_capability_with_pools(self): fake_host = host_manager.HostState('host1#pool1') self.assertIsNone(fake_host.free_capacity_gb) capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.1', 'storage_protocol': 'NFS_CIFS', 'pools': [ {'pool_name': 'pool1', 'total_capacity_gb': 500, 'free_capacity_gb': 230, 'allocated_capacity_gb': 270, 'qos': 'False', 'reserved_percentage': 0, 'dying_disks': 100, 'super_hero_1': 'spider-man', 'super_hero_2': 'flash', 'super_hero_3': 'neoncat', }, {'pool_name': 'pool2', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': 'Hulk', } ], 'timestamp': None, } fake_host.update_from_share_capability(capability) self.assertEqual('Backend1', fake_host.share_backend_name) self.assertEqual('NFS_CIFS', fake_host.storage_protocol) self.assertEqual('OpenStack', fake_host.vendor_name) self.assertEqual('1.1', fake_host.driver_version) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(2, len(fake_host.pools)) self.assertEqual(500, fake_host.pools['pool1'].total_capacity_gb) self.assertEqual(230, fake_host.pools['pool1'].free_capacity_gb) self.assertEqual(1024, fake_host.pools['pool2'].total_capacity_gb) self.assertEqual(1024, fake_host.pools['pool2'].free_capacity_gb) capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'pools': [ {'pool_name': 'pool3', 'total_capacity_gb': 10000, 'free_capacity_gb': 10000, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, }, ], 'timestamp': None, } # test update HostState Record fake_host.update_from_share_capability(capability) self.assertEqual('1.0', fake_host.driver_version) # Non-active pool stats has been removed self.assertEqual(1, len(fake_host.pools)) self.assertRaises(KeyError, lambda: fake_host.pools['pool1']) self.assertRaises(KeyError, lambda: fake_host.pools['pool2']) self.assertEqual(10000, fake_host.pools['pool3'].total_capacity_gb) self.assertEqual(10000, fake_host.pools['pool3'].free_capacity_gb) def test_update_from_share_unknown_capability(self): share_capability = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None } fake_host = host_manager.HostState('host1#_pool0') self.assertIsNone(fake_host.free_capacity_gb) fake_host.update_from_share_capability(share_capability) # Backend level stats remain uninitialized self.assertEqual(fake_host.total_capacity_gb, 0) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 'unknown') self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, 'unknown') def test_consume_from_share_capability(self): share_size = 10 free_capacity = 100 fake_share = {'id': 'foo', 'size': share_size} share_capability = { 'total_capacity_gb': free_capacity * 2, 'free_capacity_gb': free_capacity, 'reserved_percentage': 0, 'timestamp': None } fake_host = host_manager.PoolState('host1', share_capability, '_pool0') fake_host.update_from_share_capability(share_capability) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.free_capacity_gb, free_capacity - share_size) def test_consume_from_share_unknown_capability(self): share_capability = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None } fake_host = host_manager.PoolState('host1', share_capability, '_pool0') share_size = 1000 fake_share = {'id': 'foo', 'size': share_size} fake_host.update_from_share_capability(share_capability) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.total_capacity_gb, 'unknown') self.assertEqual(fake_host.free_capacity_gb, 'unknown') def test_consume_from_share_invalid_capacity(self): fake_host = host_manager.PoolState('host1', {}, '_pool0') fake_host.free_capacity_gb = 'invalid_foo_string' self.assertRaises(exception.InvalidCapacity, fake_host.consume_from_share, 'fake') def test_repr(self): capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 20000, 'free_capacity_gb': 15000, 'allocated_capacity_gb': 5000, 'timestamp': None, 'reserved_percentage': 0, } fake_host = host_manager.HostState('host1') fake_host.update_from_share_capability(capability) result = fake_host.__repr__() expected = "host: 'host1', free_capacity_gb: None, " \ "pools: {'Backend1': host: 'host1#Backend1', " \ "free_capacity_gb: 15000, pools: None}" self.assertEqual(expected, result) class PoolStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_share_capability(self): share_capability = { 'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2' } fake_pool = host_manager.PoolState('host1', None, 'pool0') self.assertIsNone(fake_pool.free_capacity_gb) fake_pool.update_from_share_capability(share_capability) self.assertEqual(fake_pool.host, 'host1#pool0') self.assertEqual(fake_pool.pool_name, 'pool0') self.assertEqual(fake_pool.total_capacity_gb, 1024) self.assertEqual(fake_pool.free_capacity_gb, 512) self.assertDictMatch(fake_pool.capabilities, share_capability) manila-2.0.0/manila/tests/scheduler/test_manager.py0000664000567000056710000003250712701407107023525 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Manager """ import ddt import mock from oslo_config import cfg from manila.common import constants from manila import context from manila import db from manila import exception from manila.scheduler.drivers import base from manila.scheduler.drivers import filter from manila.scheduler import manager from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils from manila.tests import fake_share as fakes CONF = cfg.CONF @ddt.ddt class SchedulerManagerTestCase(test.TestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = base.Scheduler driver_cls_name = 'manila.scheduler.drivers.base.Scheduler' def setUp(self): super(SchedulerManagerTestCase, self).setUp() self.flags(scheduler_driver=self.driver_cls_name) self.manager = self.manager_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} def raise_no_valid_host(*args, **kwargs): raise exception.NoValidHost(reason="") def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertIsInstance(manager.driver, self.driver_cls) @ddt.data('manila.scheduler.filter_scheduler.FilterScheduler', 'manila.scheduler.drivers.filter.FilterScheduler') def test_scheduler_driver_mapper(self, driver_class): test_manager = manager.SchedulerManager(scheduler_driver=driver_class) self.assertIsInstance(test_manager.driver, filter.FilterScheduler) def test_init_host(self): self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.manager, 'request_service_capabilities') self.manager.init_host() self.manager.request_service_capabilities.assert_called_once_with( 'fake_admin_context') def test_get_host_list(self): self.mock_object(self.manager.driver, 'get_host_list') self.manager.get_host_list(context) self.manager.driver.get_host_list.assert_called_once_with() def test_get_service_capabilities(self): self.mock_object(self.manager.driver, 'get_service_capabilities') self.manager.get_service_capabilities(context) self.manager.driver.get_service_capabilities.assert_called_once_with() def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' with mock.patch.object(self.manager.driver, 'update_service_capabilities', mock.Mock()): self.manager.update_service_capabilities( self.context, service_name=service_name, host=host) (self.manager.driver.update_service_capabilities. assert_called_once_with(service_name, host, {})) with mock.patch.object(self.manager.driver, 'update_service_capabilities', mock.Mock()): capabilities = {'fake_capability': 'fake_value'} self.manager.update_service_capabilities( self.context, service_name=service_name, host=host, capabilities=capabilities) (self.manager.driver.update_service_capabilities. assert_called_once_with(service_name, host, capabilities)) @mock.patch.object(db, 'share_update', mock.Mock()) def test_create_share_exception_puts_share_in_error_state(self): """Test NoValidHost exception for create_share. Puts the share in 'error' state and eats the exception. """ fake_share_id = 1 request_spec = {'share_id': fake_share_id} with mock.patch.object( self.manager.driver, 'schedule_create_share', mock.Mock(side_effect=self.raise_no_valid_host)): self.mock_object(manager.LOG, 'error') self.manager.create_share_instance( self.context, request_spec=request_spec, filter_properties={}) db.share_update.assert_called_once_with( self.context, fake_share_id, {'status': 'error'}) (self.manager.driver.schedule_create_share. assert_called_once_with(self.context, request_spec, {})) manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(db, 'share_update', mock.Mock()) def test_create_share_other_exception_puts_share_in_error_state(self): """Test any exception except NoValidHost for create_share. Puts the share in 'error' state and re-raises the exception. """ fake_share_id = 1 request_spec = {'share_id': fake_share_id} with mock.patch.object(self.manager.driver, 'schedule_create_share', mock.Mock(side_effect=exception.QuotaError)): self.mock_object(manager.LOG, 'error') self.assertRaises(exception.QuotaError, self.manager.create_share_instance, self.context, request_spec=request_spec, filter_properties={}) db.share_update.assert_called_once_with( self.context, fake_share_id, {'status': 'error'}) (self.manager.driver.schedule_create_share. assert_called_once_with(self.context, request_spec, {})) manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) def test_get_pools(self): """Ensure get_pools exists and calls base_scheduler.get_pools.""" mock_get_pools = self.mock_object(self.manager.driver, 'get_pools', mock.Mock(return_value='fake_pools')) result = self.manager.get_pools(self.context, filters='fake_filters') mock_get_pools.assert_called_once_with(self.context, 'fake_filters') self.assertEqual('fake_pools', result) @mock.patch.object(db, 'consistency_group_update', mock.Mock()) def test_create_cg_no_valid_host_puts_cg_in_error_state(self): """Test that NoValidHost is raised for create_consistency_group. Puts the share in 'error' state and eats the exception. """ fake_cg_id = 1 cg_id = fake_cg_id request_spec = {"consistency_group_id": cg_id} with mock.patch.object( self.manager.driver, 'schedule_create_consistency_group', mock.Mock(side_effect=self.raise_no_valid_host)): self.manager.create_consistency_group(self.context, fake_cg_id, request_spec=request_spec, filter_properties={}) db.consistency_group_update.assert_called_once_with( self.context, fake_cg_id, {'status': 'error'}) (self.manager.driver.schedule_create_consistency_group. assert_called_once_with(self.context, cg_id, request_spec, {})) @mock.patch.object(db, 'consistency_group_update', mock.Mock()) def test_create_cg_exception_puts_cg_in_error_state(self): """Test that exceptions for create_consistency_group. Puts the share in 'error' state and raises the exception. """ fake_cg_id = 1 cg_id = fake_cg_id request_spec = {"consistency_group_id": cg_id} with mock.patch.object(self.manager.driver, 'schedule_create_consistency_group', mock.Mock(side_effect=exception.NotFound)): self.assertRaises(exception.NotFound, self.manager.create_consistency_group, self.context, fake_cg_id, request_spec=request_spec, filter_properties={}) def test_migrate_share_to_host(self): share = db_utils.create_share() host = 'fake@backend#pool' self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_rpcapi.ShareAPI, 'migration_start') self.mock_object(base.Scheduler, 'host_passes_filters', mock.Mock(return_value=host)) self.manager.migrate_share_to_host(self.context, share['id'], host, False, True, {}, None) def test_migrate_share_to_host_no_valid_host(self): share = db_utils.create_share() host = 'fake@backend#pool' self.mock_object( base.Scheduler, 'host_passes_filters', mock.Mock(side_effect=[exception.NoValidHost('fake')])) self.assertRaises( exception.NoValidHost, self.manager.migrate_share_to_host, self.context, share['id'], host, False, True, {}, None) def test_manage_share(self): share = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_rpcapi.ShareAPI, 'manage_share') self.mock_object(base.Scheduler, 'host_passes_filters') self.manager.manage_share(self.context, share['id'], 'driver_options', {}, None) def test_manage_share_exception(self): share = db_utils.create_share() self.mock_object( base.Scheduler, 'host_passes_filters', mock.Mock(side_effect=exception.NoValidHost('fake'))) self.assertRaises( exception.NoValidHost, self.manager.manage_share, self.context, share['id'], 'driver_options', {}, None) def test_create_share_replica_exception_path(self): """Test 'raisable' exceptions for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[{'id': '123'}])) snap_update = self.mock_object(db, 'share_snapshot_instance_update') request_spec = fakes.fake_replica_request_spec() replica_id = request_spec.get('share_instance_properties').get('id') expected_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } with mock.patch.object(self.manager.driver, 'schedule_create_replica', mock.Mock(side_effect=exception.NotFound)): self.assertRaises(exception.NotFound, self.manager.create_share_replica, self.context, request_spec=request_spec, filter_properties={}) db_update.assert_called_once_with( self.context, replica_id, expected_updates) snap_update.assert_called_once_with( self.context, '123', {'status': constants.STATUS_ERROR}) def test_create_share_replica_no_valid_host(self): """Test the NoValidHost exception for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') request_spec = fakes.fake_replica_request_spec() replica_id = request_spec.get('share_instance_properties').get('id') expected_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } with mock.patch.object( self.manager.driver, 'schedule_create_replica', mock.Mock(side_effect=self.raise_no_valid_host)): retval = self.manager.create_share_replica( self.context, request_spec=request_spec, filter_properties={}) self.assertIsNone(retval) db_update.assert_called_once_with( self.context, replica_id, expected_updates) def test_create_share_replica(self): """Test happy path for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') mock_scheduler_driver_call = self.mock_object( self.manager.driver, 'schedule_create_replica') request_spec = fakes.fake_replica_request_spec() retval = self.manager.create_share_replica( self.context, request_spec=request_spec, filter_properties={}) mock_scheduler_driver_call.assert_called_once_with( self.context, request_spec, {}) self.assertFalse(db_update.called) self.assertIsNone(retval) manila-2.0.0/manila/tests/scheduler/test_rpcapi.py0000664000567000056710000001202412701407112023355 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.scheduler.rpcapi """ import copy import mock from oslo_config import cfg from manila import context from manila.scheduler import rpcapi as scheduler_rpcapi from manila import test CONF = cfg.CONF class SchedulerRpcAPITestCase(test.TestCase): def setUp(self): super(SchedulerRpcAPITestCase, self).setUp() def tearDown(self): super(SchedulerRpcAPITestCase, self).tearDown() def _test_scheduler_api(self, method, rpc_method, fanout=False, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() expected_retval = 'foo' if method == 'call' else None target = { "fanout": fanout, "version": kwargs.pop('version', '1.0'), } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_update_service_capabilities(self): self._test_scheduler_api('update_service_capabilities', rpc_method='cast', service_name='fake_name', host='fake_host', capabilities='fake_capabilities', fanout=True) def test_create_share_instance(self): self._test_scheduler_api('create_share_instance', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.2') def test_get_pools(self): self._test_scheduler_api('get_pools', rpc_method='call', filters=None, version='1.1') def test_create_consistency_group(self): self._test_scheduler_api('create_consistency_group', rpc_method='cast', cg_id='cg_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.3') def test_migrate_share_to_host(self): self._test_scheduler_api('migrate_share_to_host', rpc_method='call', share_id='share_id', host='host', force_host_copy=True, notify=True, request_spec='fake_request_spec', filter_properties='filter_properties', version='1.4') def test_create_share_replica(self): self._test_scheduler_api('create_share_replica', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.5') def test_manage_share(self): self._test_scheduler_api('manage_share', rpc_method='call', share_id='share_id', driver_options='fake_driver_options', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.6') manila-2.0.0/manila/tests/scheduler/filters/0000775000567000056710000000000012701407265022150 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/filters/test_share_replication.py0000664000567000056710000001166112701407107027254 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the ShareReplicationFilter. """ import ddt from oslo_context import context from manila.scheduler.filters import share_replication from manila import test from manila.tests.scheduler import fakes @ddt.ddt class ShareReplicationFilterTestCase(test.TestCase): """Test case for ShareReplicationFilter.""" def setUp(self): super(ShareReplicationFilterTestCase, self).setUp() self.filter = share_replication.ShareReplicationFilter() self.debug_log = self.mock_object(share_replication.LOG, 'debug') @staticmethod def _create_replica_request(replication_domain='kashyyyk', replication_type='dr', active_replica_host=fakes.FAKE_HOST_STRING_1, all_replica_hosts=fakes.FAKE_HOST_STRING_1, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'active_replica_host': active_replica_host, 'all_replica_hosts': all_replica_hosts, }, 'resource_type': { 'extra_specs': { 'replication_type': replication_type, }, }, 'replication_domain': replication_domain, } @ddt.data('tatooine', '') def test_share_replication_filter_fails_incompatible_domain(self, domain): request = self._create_replica_request() host = fakes.FakeHostState('host1', { 'replication_domain': domain, }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_fails_no_replication_domain(self): request = self._create_replica_request() host = fakes.FakeHostState('host1', { 'replication_domain': None, }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_fails_host_has_replicas(self): all_replica_hosts = ','.join(['host1', fakes.FAKE_HOST_STRING_1]) request = self._create_replica_request( all_replica_hosts=all_replica_hosts) host = fakes.FakeHostState('host1', { 'replication_domain': 'kashyyyk', }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_passes_no_replication_type(self): request = self._create_replica_request(replication_type=None) host = fakes.FakeHostState('host1', { 'replication_domain': 'tatooine', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_passes_no_active_replica_host(self): request = self._create_replica_request(active_replica_host=None) host = fakes.FakeHostState('host1', { 'replication_domain': 'tatooine', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_passes_happy_day(self): all_replica_hosts = ','.join(['host1', fakes.FAKE_HOST_STRING_1]) request = self._create_replica_request( all_replica_hosts=all_replica_hosts) host = fakes.FakeHostState('host2', { 'replication_domain': 'kashyyyk', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_empty(self): request = {} host = fakes.FakeHostState('host1', { 'replication_domain': 'naboo', }) self.assertTrue(self.filter.host_passes(host, request)) manila-2.0.0/manila/tests/scheduler/filters/__init__.py0000664000567000056710000000000012701407107024242 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/filters/test_retry.py0000664000567000056710000000355612701407107024732 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For RetryFilter. """ from manila import context from manila.scheduler.filters import retry from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for RetryFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.filter = retry.RetryFilter() def test_retry_filter_disabled(self): # Test case where retry/re-scheduling is disabled. host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_retry_filter_pass(self): # Node not previously tried. host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=retry) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_retry_filter_fail(self): # Node was already tried. host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=1, hosts=['host1']) filter_properties = dict(retry=retry) self.assertFalse(self.filter.host_passes(host, filter_properties)) manila-2.0.0/manila/tests/scheduler/filters/test_base.py0000664000567000056710000001226512701407107024474 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from manila.scheduler.filters import base from manila import test class TestBaseFilter(test.TestCase): def setUp(self): super(TestBaseFilter, self).setUp() self.filter = base.BaseFilter() def test_filter_one_is_called(self): filters = [1, 2, 3, 4] filter_properties = {'x': 'y'} side_effect = lambda value, props: value in [2, 3] self.mock_object(self.filter, '_filter_one', mock.Mock(side_effect=side_effect)) result = list(self.filter.filter_all(filters, filter_properties)) self.assertEqual([2, 3], result) class FakeExtension(object): def __init__(self, plugin): self.plugin = plugin class BaseFakeFilter(base.BaseFilter): pass class FakeFilter1(BaseFakeFilter): """Derives from BaseFakeFilter and has a fake entry point defined. Entry point is returned by fake ExtensionManager. Should be included in the output of all_classes. """ class FakeFilter2(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should be not included in all_classes. """ class FakeFilter3(base.BaseFilter): """Does not derive from BaseFakeFilter. Should not be included. """ class FakeFilter4(BaseFakeFilter): """Derives from BaseFakeFilter and has an entry point. Should be included. """ class FakeFilter5(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should not be included. """ run_filter_once_per_request = True class FakeExtensionManager(list): def __init__(self, namespace): classes = [FakeFilter1, FakeFilter3, FakeFilter4] exts = map(FakeExtension, classes) super(FakeExtensionManager, self).__init__(exts) self.namespace = namespace class TestBaseFilterHandler(test.TestCase): def setUp(self): super(TestBaseFilterHandler, self).setUp() self.mock_object(base.base_handler.extension, 'ExtensionManager', FakeExtensionManager) self.handler = base.BaseFilterHandler(BaseFakeFilter, 'fake_filters') def test_get_all_classes(self): # In order for a FakeFilter to be returned by get_all_classes, it has # to comply with these rules: # * It must be derived from BaseFakeFilter # AND # * It must have a python entrypoint assigned (returned by # FakeExtensionManager) expected = [FakeFilter1, FakeFilter4] result = self.handler.get_all_classes() self.assertEqual(expected, result) def _get_filtered_objects(self, filter_classes, index=0): filter_objs_initial = [1, 2, 3, 4] filter_properties = {'x': 'y'} return self.handler.get_filtered_objects(filter_classes, filter_objs_initial, filter_properties, index) @mock.patch.object(FakeFilter4, 'filter_all') @mock.patch.object(FakeFilter3, 'filter_all', return_value=None) def test_get_filtered_objects_return_none(self, fake3_filter_all, fake4_filter_all): filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertIsNone(result) self.assertFalse(fake4_filter_all.called) def test_get_filtered_objects(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) def test_get_filtered_objects_with_filter_run_once(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter5] with mock.patch.object(FakeFilter5, 'filter_all', return_value=filter_objs_expected ) as fake5_filter_all: result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=1) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=2) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) manila-2.0.0/manila/tests/scheduler/filters/test_capacity.py0000664000567000056710000002256412701407107025362 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CapacityFilter. """ import ddt from manila import context from manila.scheduler.filters import capacity from manila import test from manila.tests.scheduler import fakes from manila import utils @ddt.ddt class HostFiltersTestCase(test.TestCase): """Test case CapacityFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.filter = capacity.CapacityFilter() def _stub_service_is_up(self, ret_value): def fake_service_is_up(service): return ret_value self.mock_object(utils, 'service_is_up', fake_service_is_up) @ddt.data( {'size': 100, 'share_on': None, 'host': 'host1'}, {'size': 100, 'share_on': 'host1#pool1', 'host': 'host1#pools1'}) @ddt.unpack def test_capacity_filter_passes(self, size, share_on, host): self._stub_service_is_up(True) filter_properties = {'size': size, 'share_exists_on': share_on} service = {'disabled': False} host = fakes.FakeHostState(host, {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20}, {'free_capacity': None, 'total_capacity': None, 'reserved': None}) @ddt.unpack def test_capacity_filter_fails(self, free_capacity, total_capacity, reserved): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_capacity_filter_passes_unknown(self): free = 'unknown' self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 'unknown', 'total_capacity': 'unknown'}, {'free_capacity': 200, 'total_capacity': 'unknown'}) @ddt.unpack def test_capacity_filter_passes_total(self, free_capacity, total_capacity): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free_capacity, 'total_capacity_gb': total_capacity, 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free': 200, 'total': 'unknown', 'reserved': 5}, {'free': 50, 'total': 'unknown', 'reserved': 0}, {'free': 200, 'total': 0, 'reserved': 0}) @ddt.unpack def test_capacity_filter_fails_total(self, free, total, reserved): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free, 'total_capacity_gb': total, 'reserved_percentage': reserved, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) @ddt.data( {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 500, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 3000, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 7000, 'max_ratio': 20, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 200, 'provisioned': 300, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 125, 'provisioned': 400, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 80, 'provisioned': 600, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 2.0, 'reserved': 0, 'thin_prov': True}) @ddt.unpack def test_filter_thin_passes(self, size, cap_thin, total, free, provisioned, max_ratio, reserved, thin_prov): self._stub_service_is_up(True) filter_properties = {'size': size, 'capabilities:thin_provisioning': cap_thin} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'reserved_percentage': reserved, 'thin_provisioning': thin_prov, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'size': 200, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 0.8, 'reserved': 0, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 700, 'max_ratio': 1.5, 'reserved': 5, 'thin_prov': True}, {'size': 2000, 'cap_thin': ' True', 'total': 500, 'free': 30, 'provisioned': 9000, 'max_ratio': 20.0, 'reserved': 0, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 1000, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 0, 'provisioned': 800, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 99, 'provisioned': 1000, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}, {'size': 400, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 600, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True}) @ddt.unpack def test_filter_thin_fails(self, size, cap_thin, total, free, provisioned, max_ratio, reserved, thin_prov): self._stub_service_is_up(True) filter_properties = {'size': size, 'capabilities:thin_provisioning': cap_thin} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'reserved_percentage': reserved, 'thin_provisioning': thin_prov, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) manila-2.0.0/manila/tests/scheduler/filters/test_capabilities.py0000664000567000056710000001465412701407107026217 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CapabilitiesFilter. """ import ddt from oslo_context import context from manila.scheduler.filters import capabilities from manila import test from manila.tests.scheduler import fakes @ddt.ddt class HostFiltersTestCase(test.TestCase): """Test case for CapabilitiesFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.filter = capabilities.CapabilitiesFilter() def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): capabilities = {'enabled': True} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'resource_type': {'name': 'fake_type', 'extra_specs': especs}} host = fakes.FakeHostState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) assertion = self.assertTrue if passes else self.assertFalse assertion(self.filter.host_passes(host, filter_properties)) def test_capability_filter_passes_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) def test_capability_filter_fails_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222'}, passes=False) def test_capability_filter_passes_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '>= 8'}, passes=False) def test_capability_filter_passes_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) @ddt.data(' True', ' False') def test_capability_filter_passes_extra_specs_list_complex(self, opt1): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': opt1, 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': ['2']}, especs={'opt1': '3', 'opt2': '2'}, passes=False) def test_capability_filter_fails_extra_specs_list_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': 'fake', 'opt2': '<= 8'}, passes=False) def test_capability_filter_passes_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=True) def test_capability_filter_passes_fakescope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, passes=True) def test_capability_filter_fails_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '<= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': {'scope_lv2': {'opt1': 10}}}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=True) def test_capability_filter_fails_wrong_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', }, passes=True) def test_capability_filter_fails_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], 'opt2': ['1', '2'], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt2': '3', }, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': [True, False]}}, especs={'capabilities:scope_lv1:opt1': ' True'}, passes=False) manila-2.0.0/manila/tests/scheduler/filters/test_ignore_attempted_hosts.py0000664000567000056710000000401012701407107030321 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For IgnoreAttemptedHost filter. """ from oslo_context import context from manila.scheduler.filters import ignore_attempted_hosts from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for IgnoreAttemptedHost filter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.filter = ignore_attempted_hosts.IgnoreAttemptedHostsFilter() def test_ignore_attempted_hosts_filter_disabled(self): # Test case where re-scheduling is disabled. host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_pass(self): # Node not previously tried. host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=attempted) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_fail(self): # Node was already tried. host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host1']) filter_properties = dict(retry=attempted) self.assertFalse(self.filter.host_passes(host, filter_properties)) manila-2.0.0/manila/tests/scheduler/filters/test_availability_zone.py0000664000567000056710000000454212701407107027266 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For AvailabilityZoneFilter. """ from oslo_context import context from manila.scheduler.filters import availability_zone from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for AvailabilityZoneFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.filter = availability_zone.AvailabilityZoneFilter() @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'resource_properties': { 'availability_zone_id': zone } } } def test_availability_zone_filter_same(self): service = {'availability_zone_id': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(self.filter.host_passes(host, request)) def test_availability_zone_filter_different(self): service = {'availability_zone_id': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', {'service': service}) self.assertFalse(self.filter.host_passes(host, request)) def test_availability_zone_filter_empty(self): service = {'availability_zone_id': 'nova'} request = {} host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(self.filter.host_passes(host, request)) manila-2.0.0/manila/tests/scheduler/filters/test_extra_specs_ops.py0000664000567000056710000000474712701407107026771 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import ddt from manila.scheduler.filters import extra_specs_ops from manila import test @ddt.ddt class ExtraSpecsOpsTestCase(test.TestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) @ddt.unpack @ddt.data( ('1', '1', True), ('', '1', False), ('3', '1', False), ('222', '2', False), ('4', '> 2', False), ('123', '= 123', True), ('124', '= 123', True), ('34', '=234', False), ('34', '=', False), ('123', 's== 123', True), ('1234', 's== 123', False), ('1234', 's!= 123', True), ('123', 's!= 123', False), ('1000', 's>= 234', False), ('1234', 's<= 1000', False), ('2', 's< 12', False), ('12', 's> 2', False), ('12311321', ' 11', True), ('12311321', ' 12311321', True), ('12311321', ' 12311321 ', True), ('12310321', ' 11', False), ('12310321', ' 11 ', False), (True, 'True', True), (True, ' True', True), (True, ' False', False), (False, 'False', True), (False, ' False', True), (False, ' True', False), (False, 'Nonsense', False), (False, ' Nonsense', True), (True, 'False', False), (False, 'True', False), ('12', ' 11 12', True), ('13', ' 11 12', False), ('13', ' 11 12 ', False), ('2', '<= 10', True), ('3', '<= 2', False), ('3', '>= 1', True), ('2', '>= 3', False), ) def test_extra_specs_matches_simple(self, value, req, matches): self._do_extra_specs_ops_test( value, req, matches) manila-2.0.0/manila/tests/scheduler/filters/test_json.py0000664000567000056710000003271512701407107024535 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For JsonFilter. """ from oslo_context import context from oslo_serialization import jsonutils from manila.scheduler.filters import json from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for JsonFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) self.filter = json.JsonFilter() def test_json_filter_passes(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 0, 'free_disk_mb': 0, 'capabilities': capabilities}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_caps_disabled(self): json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], '$capabilities.enabled']) filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'resource_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_happy_day(self): """Test json filter more thoroughly.""" raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_basic_operators(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, self.filter.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) self.assertRaises(KeyError, self.filter.host_passes, host, filter_properties) def test_json_filter_empty_filters_pass(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) manila-2.0.0/manila/tests/scheduler/filters/test_base_host.py0000664000567000056710000000352712701407107025532 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ from oslo_context import context from oslo_serialization import jsonutils from manila.scheduler.filters import base_host from manila import test class TestFilter(test.TestCase): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseHostFilter.""" pass class HostFiltersTestCase(test.TestCase): """Test case for host filters.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) namespace = 'manila.scheduler.filters' filter_handler = base_host.HostFilterHandler(namespace) classes = filter_handler.get_all_classes() self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls def test_all_filters(self): # Double check at least a couple of known filters exist self.assertTrue('JsonFilter' in self.class_map) self.assertTrue('CapabilitiesFilter' in self.class_map) self.assertTrue('AvailabilityZoneFilter' in self.class_map) manila-2.0.0/manila/tests/scheduler/fakes.py0000664000567000056710000003653612701407107022153 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ from oslo_utils import timeutils from manila.scheduler.drivers import filter from manila.scheduler import host_manager from manila.scheduler.weighers import base_host as base_host_weigher SHARE_SERVICES_NO_POOLS = [ dict(id=1, host='host1', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@back1', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host2@back2', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), ] SERVICE_STATES_NO_POOLS = { 'host1': dict(share_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312, max_over_subscription_ratio=1.0, thin_provisioning=False, snapshot_support=False, driver_handles_share_servers=False), 'host2@back1': dict(share_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=400, max_over_subscription_ratio=2.0, thin_provisioning=True, snapshot_support=True, driver_handles_share_servers=False), 'host2@back2': dict(share_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=50000, max_over_subscription_ratio=20.0, thin_provisioning=True, snapshot_support=True, driver_handles_share_servers=False), } SHARE_SERVICES_WITH_POOLS = [ dict(id=1, host='host1@AAA', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@BBB', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3@CCC', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host4@DDD', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), # service on host5 is disabled dict(id=5, host='host5@EEE', topic='share', disabled=True, availability_zone='zone4', updated_at=timeutils.utcnow()), dict(id=5, host='host6@FFF', topic='share', disabled=True, availability_zone='zone5', updated_at=timeutils.utcnow()), ] SHARE_SERVICE_STATES_WITH_POOLS = { 'host1@AAA': dict(share_backend_name='AAA', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool1', total_capacity_gb=51, free_capacity_gb=41, reserved_percentage=0, provisioned_capacity_gb=10, max_over_subscription_ratio=1.0, thin_provisioning=False)]), 'host2@BBB': dict(share_backend_name='BBB', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool2', total_capacity_gb=52, free_capacity_gb=42, reserved_percentage=0, provisioned_capacity_gb=60, max_over_subscription_ratio=2.0, thin_provisioning=True)]), 'host3@CCC': dict(share_backend_name='CCC', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool3', total_capacity_gb=53, free_capacity_gb=43, reserved_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=20.0, thin_provisioning=True, consistency_group_support='pool')]), 'host4@DDD': dict(share_backend_name='DDD', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool4a', total_capacity_gb=541, free_capacity_gb=441, reserved_percentage=0, provisioned_capacity_gb=800, max_over_subscription_ratio=2.0, thin_provisioning=True, consistency_group_support='host'), dict(pool_name='pool4b', total_capacity_gb=542, free_capacity_gb=442, reserved_percentage=0, provisioned_capacity_gb=2000, max_over_subscription_ratio=10.0, thin_provisioning=True, consistency_group_support='host')]), 'host5@EEE': dict(share_backend_name='EEE', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool5a', total_capacity_gb=551, free_capacity_gb=451, reserved_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False), dict(pool_name='pool5b', total_capacity_gb=552, free_capacity_gb=452, reserved_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False)]), 'host6@FFF': dict(share_backend_name='FFF', timestamp=None, reserved_percentage=0, driver_handles_share_servers=False, snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool6a', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False), dict(pool_name='pool6b', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False)]), } class FakeFilterScheduler(filter.FilterScheduler): def __init__(self, *args, **kwargs): super(FakeFilterScheduler, self).__init__(*args, **kwargs) self.host_manager = host_manager.HostManager() class FakeHostManager(host_manager.HostManager): def __init__(self): super(FakeHostManager, self).__init__() self.service_states = { 'host1': {'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'thin_provisioning': False, 'reserved_percentage': 10, 'timestamp': None, 'snapshot_support': True, 'replication_type': 'writable', 'replication_domain': 'endor', }, 'host2': {'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'reserved_percentage': 10, 'timestamp': None, 'snapshot_support': True, 'replication_type': 'readable', 'replication_domain': 'kashyyyk', }, 'host3': {'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': False, 'consistency_group_support': 'host', 'reserved_percentage': 0, 'snapshot_support': True, 'timestamp': None, }, 'host4': {'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 1848, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': True, 'reserved_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'replication_type': 'dr', 'replication_domain': 'naboo', }, 'host5': {'total_capacity_gb': 2048, 'free_capacity_gb': 500, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.5, 'thin_provisioning': True, 'reserved_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'consistency_group_support': 'pool', 'replication_type': None, }, 'host6': {'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'thin_provisioning': False, 'reserved_percentage': 5, 'snapshot_support': True, 'timestamp': None, }, } class FakeHostState(host_manager.HostState): def __init__(self, host, attribute_dict): super(FakeHostState, self).__init__(host) for (key, val) in attribute_dict.items(): setattr(self, key, val) FAKE_HOST_STRING_1 = 'openstack@BackendA#PoolX' FAKE_HOST_STRING_2 = 'openstack@BackendB#PoolY' FAKE_HOST_STRING_3 = 'openstack@BackendC#PoolZ' def mock_host_manager_db_calls(mock_obj, disabled=None): services = [ dict(id=1, host='host1', topic='share', disabled=False, availability_zone_id='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='share', disabled=False, availability_zone_id='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='share', disabled=False, availability_zone_id='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host4', topic='share', disabled=False, availability_zone_id='zone3', updated_at=timeutils.utcnow()), dict(id=5, host='host5', topic='share', disabled=False, availability_zone_id='zone3', updated_at=timeutils.utcnow()), dict(id=6, host='host6', topic='share', disabled=False, availability_zone_id='zone4', updated_at=timeutils.utcnow()), ] if disabled is None: mock_obj.return_value = services else: mock_obj.return_value = [service for service in services if service['disabled'] == disabled] class FakeWeigher1(base_host_weigher.BaseHostWeigher): def __init__(self): pass class FakeWeigher2(base_host_weigher.BaseHostWeigher): def __init__(self): pass class FakeClass(object): def __init__(self): pass def fake_replica_request_spec(**kwargs): request_spec = { 'share_properties': { 'id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'name': 'fakename', 'size': 1, 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'availability_zone': 'fake_az', 'replication_type': 'dr', }, 'share_instance_properties': { 'id': '8d5566df-1e83-4373-84b8-6f8153a0ac41', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', }, 'share_proto': 'nfs', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'snapshot_id': None, 'share_type': 'fake_share_type', 'consistency_group': None, } request_spec.update(kwargs) return request_spec def get_fake_host(host_name=None): class FakeHost(object): def __init__(self, host_name=None): self.host = host_name or 'openstack@BackendZ#PoolA' class FakeWeightedHost(object): def __init__(self, host_name=None): self.obj = FakeHost(host_name=host_name) return FakeWeightedHost(host_name=host_name) manila-2.0.0/manila/tests/scheduler/weighers/0000775000567000056710000000000012701407265022315 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/weighers/__init__.py0000664000567000056710000000000012701407107024407 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/scheduler/weighers/test_base.py0000664000567000056710000000442012701407107024633 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler weighers. """ from manila.scheduler.weighers import base from manila import test from manila.tests.scheduler import fakes class TestWeightHandler(test.TestCase): def test_get_all_classes(self): namespace = "manila.tests.scheduler.fakes" handler = base.BaseWeightHandler( base.BaseWeigher, namespace) classes = handler.get_all_classes() self.assertTrue(fakes.FakeWeigher1 in classes) self.assertTrue(fakes.FakeWeigher2 in classes) self.assertFalse(fakes.FakeClass in classes) def test_no_multiplier(self): class FakeWeigher(base.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(base.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = base.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(result, tuple(ret)) manila-2.0.0/manila/tests/scheduler/weighers/test_capacity.py0000664000567000056710000001703312701407107025522 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Capacity Weigher. """ import mock from oslo_config import cfg from manila import context from manila.scheduler.weighers import base_host from manila.scheduler.weighers import capacity from manila.share import utils from manila import test from manila.tests.scheduler import fakes CONF = cfg.CONF class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') def _get_weighed_host(self, hosts, weight_properties=None, index=0): if weight_properties is None: weight_properties = {'size': 1} return self.weight_handler.get_weighed_objects( [capacity.CapacityWeigher], hosts, weight_properties)[index] @mock.patch('manila.db.api.IMPL.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states_share(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.share_topic) return host_states # NOTE(xyang): If thin_provisioning = True and # max_over_subscription_ratio >= 1, use the following formula: # free = math.floor(total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - total * reserved) # Otherwise, use the following formula: # free = math.floor(free_space - total * reserved) def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # weight = 0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1)=2143.0 # weight = 1.0 # host3: thin_provisioning = False # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0)=256.0 # weight = 0.08 # host4: thin_provisioning = True # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # weight = 0.0 # host5: thin_provisioning = True # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # weight = 0.65 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host2', utils.extract_host(weighed_host.obj.host)) def test_unknown_is_last(self): hostinfo_list = self._get_all_hosts() last_host = self._get_weighed_host(hostinfo_list, index=-1) self.assertEqual( 'host6', utils.extract_host(last_host.obj.host)) self.assertEqual(0.0, last_host.weight) def test_capacity_weight_multiplier_negative_1(self): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # free * (-1) = -921.0 # weight = -0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0-1748-2048 * 0.1) = 2143.0 # free * (-1) = -2143.0 # weight = -1.0 # host3: thin_provisioning = False # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * (-1) = -256.0 # weight = -0.08 # host4: thin_provisioning = True # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * (-1) = -97.0 # weight = 0.0 # host5: thin_provisioning = True # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * (-1) = -1421.0 # weight = -0.65 # host6: thin_provisioning = False # free = inf # free * (-1) = -inf # weight = 0.0 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host4', utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier_2(self): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024-1024*0.1) = 921.0 # free * 2 = 1842.0 # weight = 0.81 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1) = 2143.0 # free * 2 = 4286.0 # weight = 2.0 # host3: thin_provisioning = False # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * 2 = 512.0 # weight = 0.16 # host4: thin_provisioning = True # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * 2 = 194.0 # weight = 0.0 # host5: thin_provisioning = True # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * 2 = 2842.0 # weight = 1.29 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(2.0, weighed_host.weight) self.assertEqual( 'host2', utils.extract_host(weighed_host.obj.host)) manila-2.0.0/manila/tests/scheduler/weighers/test_pool.py0000664000567000056710000001511612701407107024676 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Pool Weigher. """ import mock from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila.db import api as db_api from manila.scheduler.weighers import base_host from manila.scheduler.weighers import pool from manila.share import utils from manila import test from manila.tests.scheduler import fakes CONF = cfg.CONF class PoolWeigherTestCase(test.TestCase): def setUp(self): super(PoolWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') share_servers = [ {'id': 'fake_server_id0'}, {'id': 'fake_server_id1'}, {'id': 'fake_server_id2'}, {'id': 'fake_server_id3'}, {'id': 'fake_server_id4'}, ] services = [ dict(id=1, host='host1@AAA', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@BBB', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3@CCC', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host@DDD', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), dict(id=5, host='host5@EEE', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), ] self.host_manager.service_states = ( fakes.SHARE_SERVICE_STATES_WITH_POOLS) self.mock_object(db_api, 'share_server_get_all_by_host', mock.Mock(return_value=share_servers)) self.mock_object(db_api.IMPL, 'service_get_all_by_topic', mock.Mock(return_value=services)) def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = { 'server_pools_mapping': { 'fake_server_id2': [{'pool_name': 'pool2'}, ], }, } return self.weight_handler.get_weighed_objects( [pool.PoolWeigher], hosts, weight_properties)[0] def _get_all_hosts(self): ctxt = context.get_admin_context() host_states = self.host_manager.get_all_host_states_share(ctxt) db_api.IMPL.service_get_all_by_topic.assert_called_once_with( ctxt, CONF.share_topic) return host_states def test_no_server_pool_mapping(self): weight_properties = { 'server_pools_mapping': {}, } weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) def test_choose_pool_with_existing_share_server(self): # host1: weight = 0*(1.0) # host2: weight = 1*(1.0) # host3: weight = 0*(1.0) # host4: weight = 0*(1.0) # host5: weight = 0*(1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weight_multiplier_positive(self): self.flags(pool_weight_multiplier=2.0) # host1: weight = 0*(2.0) # host2: weight = 1*(2.0) # host3: weight = 0*(2.0) # host4: weight = 0*(2.0) # host5: weight = 0*(2.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(2.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weight_multiplier_negative(self): self.flags(pool_weight_multiplier=-1.0) weight_properties = { 'server_pools_mapping': { 'fake_server_id0': [{'pool_name': 'pool1'}], 'fake_server_id2': [{'pool_name': 'pool3'}], 'fake_server_id3': [ {'pool_name': 'pool4a'}, {'pool_name': 'pool4b'}, ], 'fake_server_id4': [ {'pool_name': 'pool5a'}, {'pool_name': 'pool5b'}, ], }, } # host1: weight = 1*(-1.0) # host2: weight = 0*(-1.0) # host3: weight = 1*(-1.0) # host4: weight = 1*(-1.0) # host5: weight = 1*(-1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weigher_all_pools_with_share_servers(self): weight_properties = { 'server_pools_mapping': { 'fake_server_id0': [{'pool_name': 'pool1'}], 'fake_server_id1': [{'pool_name': 'pool2'}], 'fake_server_id2': [{'pool_name': 'pool3'}], 'fake_server_id3': [ {'pool_name': 'pool4a'}, {'pool_name': 'pool4b'}, ], 'fake_server_id4': [ {'pool_name': 'pool5a'}, {'pool_name': 'pool5b'}, ], }, } # host1: weight = 1*(1.0) # host2: weight = 1*(1.0) # host3: weight = 1*(1.0) # host4: weight = 1*(1.0) # host5: weight = 1*(1.0) # But after normalization all weighers will be 0 weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) manila-2.0.0/manila/tests/db_utils.py0000664000567000056710000001436612701407107020706 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from manila.common import constants from manila import context from manila import db def _create_db_row(method, default_values, custom_values): override_defaults = custom_values.pop('override_defaults', None) if override_defaults: default_values = custom_values else: default_values.update(copy.deepcopy(custom_values)) return method(context.get_admin_context(), default_values) def create_consistency_group(**kwargs): """Create a consistency group object.""" cg = { 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'status': constants.STATUS_CREATING, 'host': 'fake_host' } return _create_db_row(db.consistency_group_create, cg, kwargs) def create_cgsnapshot(cg_id, **kwargs): """Create a cgsnapshot object.""" snapshot = { 'consistency_group_id': cg_id, 'user_id': 'fake', 'project_id': 'fake', 'status': constants.STATUS_CREATING, } return _create_db_row(db.cgsnapshot_create, snapshot, kwargs) def create_cgsnapshot_member(cgsnapshot_id, **kwargs): """Create a cgsnapshot member object.""" member = { 'share_proto': "NFS", 'size': 0, 'share_id': None, 'share_instance_id': None, 'user_id': 'fake', 'project_id': 'fake', 'status': 'creating', 'cgsnapshot_id': cgsnapshot_id, } return _create_db_row(db.cgsnapshot_member_create, member, kwargs) def create_share(**kwargs): """Create a share object.""" share = { 'share_proto': "NFS", 'size': 0, 'snapshot_id': None, 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'metadata': {'fake_key': 'fake_value'}, 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'host': 'fake_host' } return _create_db_row(db.share_create, share, kwargs) def create_share_instance(**kwargs): """Create a share instance object.""" instance = { 'host': 'fake', 'status': constants.STATUS_CREATING, } instance.update(kwargs) return db.share_instance_create(context.get_admin_context(), kwargs.pop('share_id'), kwargs) def create_share_replica(**kwargs): """Create a share replica object.""" replica = { 'host': 'fake', 'status': constants.STATUS_CREATING, } replica.update(kwargs) if 'share_id' not in kwargs: share = create_share() kwargs['share_id'] = share['id'] return db.share_instance_create(context.get_admin_context(), kwargs.pop('share_id'), kwargs) def create_snapshot(**kwargs): """Create a snapshot object.""" with_share = kwargs.pop('with_share', False) share = None if with_share: share = create_share(status=constants.STATUS_AVAILABLE, size=kwargs.get('size', 0)) snapshot = { 'share_proto': "NFS", 'size': 0, 'share_id': share['id'] if with_share else None, 'user_id': 'fake', 'project_id': 'fake', 'status': 'creating', 'provider_location': 'fake', } snapshot.update(kwargs) return db.share_snapshot_create(context.get_admin_context(), snapshot) def create_snapshot_instance(snapshot_id, **kwargs): """Create a share snapshot instance object.""" snapshot_instance = { 'provider_location': 'fake_provider_location', 'progress': '0%', 'status': constants.STATUS_CREATING, } snapshot_instance.update(kwargs) return db.share_snapshot_instance_create( context.get_admin_context(), snapshot_id, snapshot_instance) def create_access(**kwargs): """Create a access rule object.""" access = { 'access_type': 'fake_type', 'access_to': 'fake_IP', 'share_id': None, } return _create_db_row(db.share_access_create, access, kwargs) def create_share_server(**kwargs): """Create a share server object.""" backend_details = kwargs.pop('backend_details', {}) srv = { 'host': 'host1', 'share_network_id': 'fake_srv_id', 'status': constants.STATUS_ACTIVE } share_srv = _create_db_row(db.share_server_create, srv, kwargs) if backend_details: db.share_server_backend_details_set( context.get_admin_context(), share_srv['id'], backend_details) return db.share_server_get(context.get_admin_context(), share_srv['id']) def create_share_network(**kwargs): """Create a share network object.""" net = { 'user_id': 'fake', 'project_id': 'fake', 'neutron_net_id': 'fake-neutron-net', 'neutron_subnet_id': 'fake-neutron-subnet', 'status': 'new', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'whatever', 'description': 'fake description', } return _create_db_row(db.share_network_create, net, kwargs) def create_security_service(**kwargs): share_network_id = kwargs.pop('share_network_id', None) service = { 'type': "FAKE", 'project_id': 'fake-project-id', } service_ref = _create_db_row(db.security_service_create, service, kwargs) if share_network_id: db.share_network_add_security_service(context.get_admin_context(), share_network_id, service_ref['id']) return service_ref manila-2.0.0/manila/tests/test_test.py0000664000567000056710000000276512701407107021117 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the testing base code.""" from oslo_config import cfg import oslo_messaging as messaging from manila import rpc from manila import test class IsolationTestCase(test.TestCase): """Ensure that things are cleaned up after failed tests. These tests don't really do much here, but if isolation fails a bunch of other tests should fail. """ def test_service_isolation(self): self.start_service('share') def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(*args): assert False, "I should never get called." target = messaging.Target(topic='share', server=cfg.CONF.host) server = rpc.get_server(target=target, endpoints=[NeverCalled()]) server.start() manila-2.0.0/manila/tests/xenapi/0000775000567000056710000000000012701407265020006 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/xenapi/__init__.py0000664000567000056710000000000012701407107022100 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/common/0000775000567000056710000000000012701407265020012 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/common/__init__.py0000664000567000056710000000000012701407107022104 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/common/test_client_auth.py0000664000567000056710000001116412701407107023720 0ustar jenkinsjenkins00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as auth from keystoneauth1.loading._plugins.identity import v2 from oslo_config import cfg import mock from manila.common import client_auth from manila import exception from manila import test from manila.tests import fake_client_exception_class class ClientAuthTestCase(test.TestCase): def setUp(self): super(ClientAuthTestCase, self).setUp() self.context = mock.Mock() self.fake_client = mock.Mock() self.execption_mod = fake_client_exception_class self.auth = client_auth.AuthClientLoader( self.fake_client, self.execption_mod, 'foo_group') def test_get_client_admin_true(self): mock_load_session = self.mock_object(auth, 'load_session_from_conf_options') self.auth.get_client(self.context, admin=True) mock_load_session.assert_called_once_with(client_auth.CONF, 'foo_group') self.fake_client.assert_called_once_with( session=mock_load_session(), auth=auth.load_auth_from_conf_options()) def test_get_client_admin_false(self): self.mock_object(auth, 'load_session_from_conf_options') self.assertRaises(exception.ManilaException, self.auth.get_client, self.context, admin=False) def test_load_auth_plugin_caching(self): self.auth.admin_auth = 'admin obj' result = self.auth._load_auth_plugin() self.assertEqual(self.auth.admin_auth, result) def test_load_auth_plugin_no_auth(self): auth.load_auth_from_conf_options.return_value = None self.assertRaises(fake_client_exception_class.Unauthorized, self.auth._load_auth_plugin) def test_load_auth_plugin_no_auth_deprecated_opts(self): auth.load_auth_from_conf_options.return_value = None self.auth.deprecated_opts_for_v2 = {"username": "foo"} pwd_mock = self.mock_object(v2, 'Password') auth_result = mock.Mock() auth_result.load_from_options = mock.Mock(return_value='foo_auth') pwd_mock.return_value = auth_result result = self.auth._load_auth_plugin() pwd_mock.assert_called_once_with() auth_result.load_from_options.assert_called_once_with(username='foo') self.assertEqual(result, 'foo_auth') @mock.patch.object(auth, 'register_session_conf_options') @mock.patch.object(auth, 'get_auth_common_conf_options') @mock.patch.object(auth, 'get_auth_plugin_conf_options') def test_list_opts(self, auth_conf, common_conf, register): register.return_value = [cfg.StrOpt('username'), cfg.StrOpt('password')] common_conf.return_value = ([cfg.StrOpt('auth_url')]) auth_conf.return_value = [cfg.StrOpt('password')] result = client_auth.AuthClientLoader.list_opts("foo_group") self.assertEqual('foo_group', result[0][0]) for entry in result[0][1]: self.assertIn(entry.name, ['username', 'auth_url', 'password']) common_conf.assert_called_once_with() auth_conf.assert_called_once_with('password') @mock.patch.object(auth, 'register_session_conf_options') @mock.patch.object(auth, 'get_auth_common_conf_options') @mock.patch.object(auth, 'get_auth_plugin_conf_options') def test_list_opts_not_found(self, auth_conf, common_conf, register,): register.return_value = [cfg.StrOpt('username'), cfg.StrOpt('password')] common_conf.return_value = ([cfg.StrOpt('auth_url')]) auth_conf.return_value = [cfg.StrOpt('tenant')] result = client_auth.AuthClientLoader.list_opts("foo_group") self.assertEqual('foo_group', result[0][0]) for entry in result[0][1]: self.assertIn(entry.name, ['username', 'auth_url', 'password', 'tenant']) common_conf.assert_called_once_with() auth_conf.assert_called_once_with('password') manila-2.0.0/manila/tests/common/test_config.py0000664000567000056710000000325012701407107022663 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.common import config from manila.common import constants from manila import exception from manila import test from manila.tests import utils as test_utils VALID_CASES = [proto.lower() for proto in constants.SUPPORTED_SHARE_PROTOCOLS] VALID_CASES.extend([proto.upper() for proto in VALID_CASES]) VALID_CASES.append(','.join(case for case in VALID_CASES)) @ddt.ddt class VerifyConfigShareProtocolsTestCase(test.TestCase): @ddt.data(*VALID_CASES) def test_verify_share_protocols_valid_cases(self, proto): data = dict(DEFAULT=dict(enabled_share_protocols=proto)) with test_utils.create_temp_config_with_opts(data): config.verify_share_protocols() @ddt.data(None, '', 'fake', [], ['fake'], [VALID_CASES[0] + 'fake']) def test_verify_share_protocols_invalid_cases(self, proto): data = dict(DEFAULT=dict(enabled_share_protocols=proto)) with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.ManilaException, config.verify_share_protocols) manila-2.0.0/manila/tests/runtime_conf.py0000664000567000056710000000153012701407107021556 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) manila-2.0.0/manila/tests/db/0000775000567000056710000000000012701407265017107 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/test_migration.py0000664000567000056710000000544412701407107022513 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import alembic import mock from manila.db import migration from manila import test class MigrationTestCase(test.TestCase): def setUp(self): super(MigrationTestCase, self).setUp() self.config_patcher = mock.patch( 'manila.db.migrations.alembic.migration._alembic_config') self.config = self.config_patcher.start() self.config.return_value = 'fake_config' self.addCleanup(self.config_patcher.stop) @mock.patch('alembic.command.upgrade') def test_upgrade(self, upgrade): migration.upgrade('version_1') upgrade.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.upgrade') def test_upgrade_none_version(self, upgrade): migration.upgrade(None) upgrade.assert_called_once_with('fake_config', 'head') @mock.patch('alembic.command.downgrade') def test_downgrade(self, downgrade): migration.downgrade('version_1') downgrade.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.downgrade') def test_downgrade_none_version(self, downgrade): migration.downgrade(None) downgrade.assert_called_once_with('fake_config', 'base') @mock.patch('alembic.command.stamp') def test_stamp(self, stamp): migration.stamp('version_1') stamp.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.stamp') def test_stamp_none_version(self, stamp): migration.stamp(None) stamp.assert_called_once_with('fake_config', 'head') @mock.patch('alembic.command.revision') def test_revision(self, revision): migration.revision('test_message', 'autogenerate_value') revision.assert_called_once_with('fake_config', 'test_message', 'autogenerate_value') @mock.patch.object(alembic.migration.MigrationContext, 'configure', mock.Mock()) def test_version(self): context = mock.Mock() context.get_current_revision = mock.Mock() alembic.migration.MigrationContext.configure.return_value = context migration.version() context.get_current_revision.assert_called_once_with() manila-2.0.0/manila/tests/db/__init__.py0000664000567000056710000000126012701407107021212 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`db` -- Stubs for DB API ============================= """ manila-2.0.0/manila/tests/db/test_api.py0000664000567000056710000000421612701407107021267 0ustar jenkinsjenkins00000000000000# Copyright (c) Goutham Pacha Ravi. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for the interface methods in the manila/db/api.py.""" import re from manila.db import api as db_interface from manila.db.sqlalchemy import api as db_api from manila import test class DBInterfaceTestCase(test.TestCase): """Test cases for the DB Interface methods.""" def setUp(self): super(self.__class__, self).setUp() def test_interface_methods(self): """Ensure that implementation methods match interfaces. manila/db/api module is merely shim layer between the database implementation and the other methods using these implementations. Bugs are introduced when the shims go out of sync with the actual implementation. So this test ensures that method names and signatures match between the interface and the implementation. """ members = dir(db_interface) # Ignore private methods for the file and any other members that # need not match. ignore_members = re.compile(r'^_|CONF|IMPL') interfaces = [i for i in members if not ignore_members.match(i)] for interface in interfaces: method = getattr(db_interface, interface) if callable(method): mock_method_call = self.mock_object(db_api, interface) # kwargs always specify defaults, ignore them in the signature. args = filter( lambda x: x != 'kwargs', method.__code__.co_varnames) method(*args) self.assertTrue(mock_method_call.called) manila-2.0.0/manila/tests/db/sqlalchemy/0000775000567000056710000000000012701407265021251 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/sqlalchemy/__init__.py0000664000567000056710000000000012701407107023343 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/sqlalchemy/test_api.py0000664000567000056710000025757112701407107023447 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Testing of SQLAlchemy backend.""" import ddt from oslo_db import exception as db_exception from oslo_utils import uuidutils import six from manila.common import constants from manila import context from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models from manila import exception from manila import test from manila.tests import db_utils security_service_dict = { 'id': 'fake id', 'project_id': 'fake project', 'type': 'ldap', 'dns_ip': 'fake dns', 'server': 'fake ldap server', 'domain': 'fake ldap domain', 'user': 'fake user', 'password': 'fake password', 'name': 'whatever', 'description': 'nevermind', } class BaseDatabaseAPITestCase(test.TestCase): def _check_fields(self, expected, actual): for key in expected: self.assertEqual(expected[key], actual[key]) @ddt.ddt class GenericDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(GenericDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() @ddt.unpack @ddt.data( {'values': {'test': 'fake'}, 'call_count': 1}, {'values': {'test': 'fake', 'id': 'fake'}, 'call_count': 0}, {'values': {'test': 'fake', 'fooid': 'fake'}, 'call_count': 1}, {'values': {'test': 'fake', 'idfoo': 'fake'}, 'call_count': 1}, ) def test_ensure_model_values_has_id(self, values, call_count): self.mock_object(uuidutils, 'generate_uuid') db_api.ensure_model_dict_has_id(values) self.assertEqual(call_count, uuidutils.generate_uuid.call_count) self.assertIn('id', values) def test_custom_query(self): share = db_utils.create_share() share_access = db_utils.create_access(share_id=share['id']) db_api.share_instance_access_delete( self.ctxt, share_access.instance_mappings[0].id) db_api.share_access_delete(self.ctxt, share_access.id) self.assertRaises(exception.NotFound, db_api.share_access_get, self.ctxt, share_access.id) @ddt.ddt class ShareAccessDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareAccessDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_share_instance_update_access_status(self): share = db_utils.create_share() share_instance = db_utils.create_share_instance(share_id=share['id']) db_utils.create_access(share_id=share_instance['share_id']) db_api.share_instance_update_access_status( self.ctxt, share_instance['id'], constants.STATUS_ACTIVE ) result = db_api.share_instance_get(self.ctxt, share_instance['id']) self.assertEqual(constants.STATUS_ACTIVE, result['access_rules_status']) def test_share_instance_update_access_status_invalid(self): share = db_utils.create_share() share_instance = db_utils.create_share_instance(share_id=share['id']) db_utils.create_access(share_id=share_instance['share_id']) self.assertRaises( db_exception.DBError, db_api.share_instance_update_access_status, self.ctxt, share_instance['id'], "fake_status" ) @ddt.ddt class ShareDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_share_filter_by_host_with_pools(self): share_instances = [[ db_api.share_create(self.ctxt, {'host': value}).instance for value in ('foo', 'foo#pool0')]] db_utils.create_share() self._assertEqualListsOfObjects(share_instances[0], db_api.share_instances_get_all_by_host( self.ctxt, 'foo'), ignored_keys=['share_type', 'share_type_id', 'export_locations']) def test_share_filter_all_by_host_with_pools_multiple_hosts(self): share_instances = [[ db_api.share_create(self.ctxt, {'host': value}).instance for value in ('foo', 'foo#pool0', 'foo', 'foo#pool1')]] db_utils.create_share() self._assertEqualListsOfObjects(share_instances[0], db_api.share_instances_get_all_by_host( self.ctxt, 'foo'), ignored_keys=['share_type', 'share_type_id', 'export_locations']) def test_share_filter_all_by_share_server(self): share_network = db_utils.create_share_network() share_server = db_utils.create_share_server( share_network_id=share_network['id']) share = db_utils.create_share(share_server_id=share_server['id'], share_network_id=share_network['id']) actual_result = db_api.share_get_all_by_share_server( self.ctxt, share_server['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_filter_all_by_consistency_group(self): cg = db_utils.create_consistency_group() share = db_utils.create_share(consistency_group_id=cg['id']) actual_result = db_api.share_get_all_by_consistency_group_id( self.ctxt, cg['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_instance_delete_with_share(self): share = db_utils.create_share() db_api.share_instance_delete(self.ctxt, share.instance['id']) self.assertRaises(exception.NotFound, db_api.share_get, self.ctxt, share['id']) def test_share_instance_get(self): share = db_utils.create_share() instance = db_api.share_instance_get(self.ctxt, share.instance['id']) self.assertEqual('share-%s' % instance['id'], instance['name']) def test_share_instance_get_all_by_consistency_group(self): cg = db_utils.create_consistency_group() db_utils.create_share(consistency_group_id=cg['id']) db_utils.create_share() instances = db_api.share_instances_get_all_by_consistency_group_id( self.ctxt, cg['id']) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) @ddt.data('host', 'consistency_group_id') def test_share_get_all_sort_by_share_instance_fields(self, sort_key): shares = [db_utils.create_share(**{sort_key: n, 'size': 1}) for n in ('test1', 'test2')] actual_result = db_api.share_get_all( self.ctxt, sort_key=sort_key, sort_dir='desc') self.assertEqual(2, len(actual_result)) self.assertEqual(shares[0]['id'], actual_result[1]['id']) @ddt.data(None, 'writable') def test_share_get_has_replicas_field(self, replication_type): share = db_utils.create_share(replication_type=replication_type) db_share = db_api.share_get(self.ctxt, share['id']) self.assertTrue('has_replicas' in db_share) @ddt.data({'with_share_data': False, 'with_share_server': False}, {'with_share_data': False, 'with_share_server': True}, {'with_share_data': True, 'with_share_server': False}, {'with_share_data': True, 'with_share_server': True}) @ddt.unpack def test_share_replicas_get_all(self, with_share_data, with_share_server): share_server = db_utils.create_share_server() share_1 = db_utils.create_share() share_2 = db_utils.create_share() db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share_1['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share_1['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC, share_id=share_2['id'], share_server_id=share_server['id']) db_utils.create_share_replica(share_id=share_2['id']) expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_id', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_cgsnapshot_member_id', } session = db_api.get_session() with session.begin(): share_replicas = db_api.share_replicas_get_all( self.ctxt, with_share_server=with_share_server, with_share_data=with_share_data, session=session) self.assertEqual(3, len(share_replicas)) for replica in share_replicas: if with_share_server: self.assertTrue(expected_ss_keys.issubset( replica['share_server'].keys())) else: self.assertFalse('share_server' in replica.keys()) self.assertEqual( with_share_data, expected_share_keys.issubset(replica.keys())) @ddt.data({'with_share_data': False, 'with_share_server': False}, {'with_share_data': False, 'with_share_server': True}, {'with_share_data': True, 'with_share_server': False}, {'with_share_data': True, 'with_share_server': True}) @ddt.unpack def test_share_replicas_get_all_by_share(self, with_share_data, with_share_server): share_server = db_utils.create_share_server() share = db_utils.create_share() db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC, share_id=share['id'], share_server_id=share_server['id']) expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_id', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_cgsnapshot_member_id', } session = db_api.get_session() with session.begin(): share_replicas = db_api.share_replicas_get_all_by_share( self.ctxt, share['id'], with_share_server=with_share_server, with_share_data=with_share_data, session=session) self.assertEqual(3, len(share_replicas)) for replica in share_replicas: if with_share_server: self.assertTrue(expected_ss_keys.issubset( replica['share_server'].keys())) else: self.assertFalse('share_server' in replica.keys()) self.assertEqual(with_share_data, expected_share_keys.issubset(replica.keys())) def test_share_replicas_get_available_active_replica(self): share_server = db_utils.create_share_server() share_1 = db_utils.create_share() share_2 = db_utils.create_share() share_3 = db_utils.create_share() db_utils.create_share_replica( id='Replica1', share_id=share_1['id'], status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id']) db_utils.create_share_replica( id='Replica2', status=constants.STATUS_AVAILABLE, share_id=share_1['id'], replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id']) db_utils.create_share_replica( id='Replica3', status=constants.STATUS_AVAILABLE, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica4', status=constants.STATUS_ERROR, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica5', status=constants.STATUS_AVAILABLE, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_IN_SYNC) db_utils.create_share_replica( id='Replica6', share_id=share_3['id'], status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_IN_SYNC) session = db_api.get_session() expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_id', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_cgsnapshot_member_id', } with session.begin(): replica_share_1 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_1['id'], with_share_server=True, session=session) ) replica_share_2 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_2['id'], with_share_data=True, session=session) ) replica_share_3 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_3['id'], session=session) ) self.assertIn(replica_share_1.get('id'), ['Replica1', 'Replica2']) self.assertTrue(expected_ss_keys.issubset( replica_share_1['share_server'].keys())) self.assertFalse( expected_share_keys.issubset(replica_share_1.keys())) self.assertEqual(replica_share_2.get('id'), 'Replica3') self.assertFalse(replica_share_2['share_server']) self.assertTrue( expected_share_keys.issubset(replica_share_2.keys())) self.assertIsNone(replica_share_3) def test_share_replicas_get_active_replicas_by_share(self): db_utils.create_share_replica( id='Replica1', share_id='FAKE_SHARE_ID1', status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica2', status=constants.STATUS_AVAILABLE, share_id='FAKE_SHARE_ID1', replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica3', status=constants.STATUS_AVAILABLE, share_id='FAKE_SHARE_ID2', replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica4', status=constants.STATUS_ERROR, share_id='FAKE_SHARE_ID2', replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica5', status=constants.STATUS_AVAILABLE, share_id='FAKE_SHARE_ID2', replica_state=constants.REPLICA_STATE_IN_SYNC) db_utils.create_share_replica( id='Replica6', share_id='FAKE_SHARE_ID3', status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_IN_SYNC) def get_active_replica_ids(share_id): active_replicas = ( db_api.share_replicas_get_active_replicas_by_share( self.ctxt, share_id) ) return [r['id'] for r in active_replicas] active_ids_shr1 = get_active_replica_ids('FAKE_SHARE_ID1') active_ids_shr2 = get_active_replica_ids('FAKE_SHARE_ID2') active_ids_shr3 = get_active_replica_ids('FAKE_SHARE_ID3') self.assertEqual(active_ids_shr1, ['Replica1', 'Replica2']) self.assertEqual(active_ids_shr2, ['Replica3', 'Replica4']) self.assertEqual([], active_ids_shr3) def test_share_replica_get_exception(self): replica = db_utils.create_share_replica(share_id='FAKE_SHARE_ID') self.assertRaises(exception.ShareReplicaNotFound, db_api.share_replica_get, self.ctxt, replica['id']) def test_share_replica_get_without_share_data(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) expected_extra_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_cgsnapshot_member_id', } share_replica = db_api.share_replica_get(self.ctxt, replica['id']) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual(share['id'], share_replica['share_id']) self.assertFalse(expected_extra_keys.issubset(share_replica.keys())) def test_share_replica_get_with_share_data(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) expected_extra_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_cgsnapshot_member_id', } share_replica = db_api.share_replica_get( self.ctxt, replica['id'], with_share_data=True) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual(share['id'], share_replica['share_id']) self.assertTrue(expected_extra_keys.issubset(share_replica.keys())) def test_share_replica_get_with_share_server(self): session = db_api.get_session() share_server = db_utils.create_share_server() share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id'] ) expected_extra_keys = { 'backend_details', 'host', 'id', 'share_network_id', 'status', } with session.begin(): share_replica = db_api.share_replica_get( self.ctxt, replica['id'], with_share_server=True, session=session) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual( share_server['id'], share_replica['share_server_id']) self.assertTrue(expected_extra_keys.issubset( share_replica['share_server'].keys())) def test_share_replica_update(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) updated_replica = db_api.share_replica_update( self.ctxt, replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, updated_replica['replica_state']) def test_share_replica_delete(self): share = db_utils.create_share() share = db_api.share_get(self.ctxt, share['id']) replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) self.assertEqual(1, len( db_api.share_replicas_get_all_by_share(self.ctxt, share['id']))) db_api.share_replica_delete(self.ctxt, replica['id']) self.assertEqual( [], db_api.share_replicas_get_all_by_share(self.ctxt, share['id'])) def test_share_instance_access_copy(self): share = db_utils.create_share() rules = [] for i in range(0, 5): rules.append(db_utils.create_access(share_id=share['id'])) instance = db_utils.create_share_instance(share_id=share['id']) share_access_rules = db_api.share_instance_access_copy( self.ctxt, share['id'], instance['id']) share_access_rule_ids = [a['id'] for a in share_access_rules] self.assertEqual(5, len(share_access_rules)) for rule_id in share_access_rule_ids: self.assertIsNotNone( db_api.share_instance_access_get( self.ctxt, rule_id, instance['id'])) @ddt.ddt class ConsistencyGroupDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ConsistencyGroupDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_consistency_group_create_with_share_type(self): fake_share_types = ["fake_share_type"] cg = db_utils.create_consistency_group(share_types=fake_share_types) cg = db_api.consistency_group_get(self.ctxt, cg['id']) self.assertEqual(1, len(cg['share_types'])) def test_consistency_group_get(self): cg = db_utils.create_consistency_group() self.assertDictMatch(dict(cg), dict(db_api.consistency_group_get(self.ctxt, cg['id']))) def test_count_consistency_groups_in_share_network(self): share_network = db_utils.create_share_network() db_utils.create_consistency_group() db_utils.create_consistency_group(share_network_id=share_network['id']) count = db_api.count_consistency_groups_in_share_network( self.ctxt, share_network_id=share_network['id']) self.assertEqual(1, count) def test_consistency_group_get_all(self): expected_cg = db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all(self.ctxt, detailed=False) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertEqual(2, len(dict(cg).keys())) self.assertEqual(expected_cg['id'], cg['id']) self.assertEqual(expected_cg['name'], cg['name']) def test_consistency_group_get_all_with_detail(self): expected_cg = db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all(self.ctxt, detailed=True) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertDictMatch(dict(expected_cg), dict(cg)) def test_consistency_group_get_all_by_host(self): fake_host = 'my_fake_host' expected_cg = db_utils.create_consistency_group(host=fake_host) db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all_by_host(self.ctxt, fake_host, detailed=False) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertEqual(2, len(dict(cg).keys())) self.assertEqual(expected_cg['id'], cg['id']) self.assertEqual(expected_cg['name'], cg['name']) def test_consistency_group_get_all_by_host_with_details(self): fake_host = 'my_fake_host' expected_cg = db_utils.create_consistency_group(host=fake_host) db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all_by_host(self.ctxt, fake_host, detailed=True) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertDictMatch(dict(expected_cg), dict(cg)) self.assertEqual(fake_host, cg['host']) def test_consistency_group_get_all_by_project(self): fake_project = 'fake_project' expected_cg = db_utils.create_consistency_group( project_id=fake_project) db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all_by_project(self.ctxt, fake_project, detailed=False) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertEqual(2, len(dict(cg).keys())) self.assertEqual(expected_cg['id'], cg['id']) self.assertEqual(expected_cg['name'], cg['name']) def test_consistency_group_get_all_by_share_server(self): fake_server = 123 expected_cg = db_utils.create_consistency_group( share_server_id=fake_server) db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all_by_share_server(self.ctxt, fake_server) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertEqual(expected_cg['id'], cg['id']) self.assertEqual(expected_cg['name'], cg['name']) def test_consistency_group_get_all_by_project_with_details(self): fake_project = 'fake_project' expected_cg = db_utils.create_consistency_group( project_id=fake_project) db_utils.create_consistency_group() cgs = db_api.consistency_group_get_all_by_project(self.ctxt, fake_project, detailed=True) self.assertEqual(1, len(cgs)) cg = cgs[0] self.assertDictMatch(dict(expected_cg), dict(cg)) self.assertEqual(fake_project, cg['project_id']) def test_consistency_group_update(self): fake_name = "my_fake_name" expected_cg = db_utils.create_consistency_group() expected_cg['name'] = fake_name db_api.consistency_group_update(self.ctxt, expected_cg['id'], {'name': fake_name}) cg = db_api.consistency_group_get(self.ctxt, expected_cg['id']) self.assertEqual(fake_name, cg['name']) def test_consistency_group_destroy(self): cg = db_utils.create_consistency_group() db_api.consistency_group_get(self.ctxt, cg['id']) db_api.consistency_group_destroy(self.ctxt, cg['id']) self.assertRaises(exception.NotFound, db_api.consistency_group_get, self.ctxt, cg['id']) def test_count_shares_in_consistency_group(self): cg = db_utils.create_consistency_group() db_utils.create_share(consistency_group_id=cg['id']) db_utils.create_share() count = db_api.count_shares_in_consistency_group(self.ctxt, cg['id']) self.assertEqual(1, count) def test_count_cgsnapshots_in_consistency_group(self): cg = db_utils.create_consistency_group() db_utils.create_cgsnapshot(cg['id']) db_utils.create_cgsnapshot(cg['id']) count = db_api.count_cgsnapshots_in_consistency_group(self.ctxt, cg['id']) self.assertEqual(2, count) def test_cgsnapshot_get(self): cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) self.assertDictMatch(dict(cgsnap), dict(db_api.cgsnapshot_get(self.ctxt, cgsnap['id']))) def test_cgsnapshot_get_all(self): cg = db_utils.create_consistency_group() expected_cgsnap = db_utils.create_cgsnapshot(cg['id']) snaps = db_api.cgsnapshot_get_all(self.ctxt, detailed=False) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertEqual(2, len(dict(snap).keys())) self.assertEqual(expected_cgsnap['id'], snap['id']) self.assertEqual(expected_cgsnap['name'], snap['name']) def test_cgsnapshot_get_all_with_detail(self): cg = db_utils.create_consistency_group() expected_cgsnap = db_utils.create_cgsnapshot(cg['id']) snaps = db_api.cgsnapshot_get_all(self.ctxt, detailed=True) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertDictMatch(dict(expected_cgsnap), dict(snap)) def test_cgsnapshot_get_all_by_project(self): fake_project = 'fake_project' cg = db_utils.create_consistency_group() expected_cgsnap = db_utils.create_cgsnapshot(cg['id'], project_id=fake_project) snaps = db_api.cgsnapshot_get_all_by_project(self.ctxt, fake_project, detailed=False) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertEqual(2, len(dict(snap).keys())) self.assertEqual(expected_cgsnap['id'], snap['id']) self.assertEqual(expected_cgsnap['name'], snap['name']) def test_cgsnapshot_get_all_by_project_with_details(self): fake_project = 'fake_project' cg = db_utils.create_consistency_group() expected_cgsnap = db_utils.create_cgsnapshot(cg['id'], project_id=fake_project) snaps = db_api.cgsnapshot_get_all_by_project(self.ctxt, fake_project, detailed=True) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertDictMatch(dict(expected_cgsnap), dict(snap)) self.assertEqual(fake_project, snap['project_id']) def test_cgsnapshot_update(self): fake_name = "my_fake_name" cg = db_utils.create_consistency_group() expected_cgsnap = db_utils.create_cgsnapshot(cg['id']) expected_cgsnap['name'] = fake_name db_api.cgsnapshot_update(self.ctxt, expected_cgsnap['id'], {'name': fake_name}) cgsnap = db_api.cgsnapshot_get(self.ctxt, expected_cgsnap['id']) self.assertEqual(fake_name, cgsnap['name']) def test_cgsnapshot_destroy(self): cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) db_api.cgsnapshot_get(self.ctxt, cgsnap['id']) db_api.cgsnapshot_destroy(self.ctxt, cgsnap['id']) self.assertRaises(exception.NotFound, db_api.cgsnapshot_get, self.ctxt, cgsnap['id']) def test_cgsnapshot_members_get_all(self): cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) expected_member = db_utils.create_cgsnapshot_member(cgsnap['id']) members = db_api.cgsnapshot_members_get_all(self.ctxt, cgsnap['id']) self.assertEqual(1, len(members)) member = members[0] self.assertDictMatch(dict(expected_member), dict(member)) def test_count_cgsnapshot_members_in_share(self): share = db_utils.create_share() share2 = db_utils.create_share() cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) db_utils.create_cgsnapshot_member(cgsnap['id'], share_id=share['id']) db_utils.create_cgsnapshot_member(cgsnap['id'], share_id=share2['id']) count = db_api.count_cgsnapshot_members_in_share( self.ctxt, share['id']) self.assertEqual(1, count) def test_cgsnapshot_members_get(self): cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) expected_member = db_utils.create_cgsnapshot_member(cgsnap['id']) member = db_api.cgsnapshot_member_get(self.ctxt, expected_member['id']) self.assertDictMatch(dict(expected_member), dict(member)) def test_cgsnapshot_members_get_not_found(self): self.assertRaises(exception.CGSnapshotMemberNotFound, db_api.cgsnapshot_member_get, self.ctxt, 'fake_id') def test_cgsnapshot_member_update(self): cg = db_utils.create_consistency_group() cgsnap = db_utils.create_cgsnapshot(cg['id']) expected_member = db_utils.create_cgsnapshot_member(cgsnap['id']) db_api.cgsnapshot_member_update( self.ctxt, expected_member['id'], {'status': constants.STATUS_AVAILABLE}) member = db_api.cgsnapshot_member_get(self.ctxt, expected_member['id']) self.assertEqual(constants.STATUS_AVAILABLE, member['status']) @ddt.ddt class ShareSnapshotDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareSnapshotDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() self.share_instances = [ db_utils.create_share_instance( status=constants.STATUS_REPLICATION_CHANGE, share_id='fake_share_id_1'), db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_share_id_1'), db_utils.create_share_instance( status=constants.STATUS_ERROR_DELETING, share_id='fake_share_id_2'), db_utils.create_share_instance( status=constants.STATUS_MANAGING, share_id='fake_share_id_2'), ] self.share_1 = db_utils.create_share( id='fake_share_id_1', instances=self.share_instances[0:2]) self.share_2 = db_utils.create_share( id='fake_share_id_2', instances=self.share_instances[2:-1]) self.snapshot_instances = [ db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_CREATING, share_instance_id=self.share_instances[0]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_ERROR, share_instance_id=self.share_instances[1]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_DELETING, share_instance_id=self.share_instances[2]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_2', status=constants.STATUS_AVAILABLE, id='fake_snapshot_instance_id', provider_location='hogsmeade:snapshot1', progress='87%', share_instance_id=self.share_instances[3]['id']), ] self.snapshot_1 = db_utils.create_snapshot( id='fake_snapshot_id_1', share_id=self.share_1['id'], instances=self.snapshot_instances[0:3]) self.snapshot_2 = db_utils.create_snapshot( id='fake_snapshot_id_2', share_id=self.share_2['id'], instances=self.snapshot_instances[3:4]) def test_create(self): share = db_utils.create_share(size=1) values = { 'share_id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_name': 'fake', 'display_description': 'fake', 'share_proto': share['share_proto'] } actual_result = db_api.share_snapshot_create( self.ctxt, values, create_snapshot_instance=True) self.assertEqual(1, len(actual_result.instances)) self.assertSubDictMatch(values, actual_result.to_dict()) def test_get_instance(self): snapshot = db_utils.create_snapshot(with_share=True) instance = db_api.share_snapshot_instance_get( self.ctxt, snapshot.instance['id'], with_share_data=True) instance_dict = instance.to_dict() self.assertTrue(hasattr(instance, 'name')) self.assertTrue(hasattr(instance, 'share_name')) self.assertTrue(hasattr(instance, 'share_id')) self.assertIn('name', instance_dict) self.assertIn('share_name', instance_dict) self.assertIn('share_id', instance_dict) @ddt.data(None, constants.STATUS_ERROR) def test_share_snapshot_instance_get_all_with_filters_some(self, status): expected_status = status or (constants.STATUS_CREATING, constants.STATUS_DELETING) expected_number = 1 if status else 3 filters = { 'snapshot_ids': 'fake_snapshot_id_1', 'statuses': expected_status } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters) for instance in instances: self.assertEqual('fake_snapshot_id_1', instance['snapshot_id']) self.assertTrue(instance['status'] in filters['statuses']) self.assertEqual(expected_number, len(instances)) def test_share_snapshot_instance_get_all_with_filters_all_filters(self): filters = { 'snapshot_ids': 'fake_snapshot_id_2', 'instance_ids': 'fake_snapshot_instance_id', 'statuses': constants.STATUS_AVAILABLE, 'share_instance_ids': self.share_instances[3]['id'], } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters, with_share_data=True) self.assertEqual(1, len(instances)) self.assertEqual('fake_snapshot_instance_id', instances[0]['id']) self.assertEqual( self.share_2['id'], instances[0]['share_instance']['share_id']) def test_share_snapshot_instance_get_all_with_filters_wrong_filters(self): filters = { 'some_key': 'some_value', 'some_other_key': 'some_other_value', } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters) self.assertEqual(6, len(instances)) def test_share_snapshot_instance_create(self): snapshot = db_utils.create_snapshot(with_share=True) share = snapshot['share'] share_instance = db_utils.create_share_instance(share_id=share['id']) values = { 'snapshot_id': snapshot['id'], 'share_instance_id': share_instance['id'], 'status': constants.STATUS_MANAGING, 'progress': '88%', 'provider_location': 'whomping_willow', } actual_result = db_api.share_snapshot_instance_create( self.ctxt, snapshot['id'], values) snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id']) self.assertSubDictMatch(values, actual_result.to_dict()) self.assertEqual(2, len(snapshot['instances'])) def test_share_snapshot_instance_update(self): snapshot = db_utils.create_snapshot(with_share=True) values = { 'snapshot_id': snapshot['id'], 'status': constants.STATUS_ERROR, 'progress': '18%', 'provider_location': 'godrics_hollow', } actual_result = db_api.share_snapshot_instance_update( self.ctxt, snapshot['instance']['id'], values) self.assertSubDictMatch(values, actual_result.to_dict()) @ddt.data(2, 1) def test_share_snapshot_instance_delete(self, instances): snapshot = db_utils.create_snapshot(with_share=True) first_instance_id = snapshot['instance']['id'] if instances > 1: instance = db_utils.create_snapshot_instance( snapshot['id'], share_instance_id=snapshot['share']['instance']['id']) else: instance = snapshot['instance'] retval = db_api.share_snapshot_instance_delete( self.ctxt, instance['id']) self.assertIsNone(retval) if instances == 1: self.assertRaises(exception.ShareSnapshotNotFound, db_api.share_snapshot_get, self.ctxt, snapshot['id']) else: snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id']) self.assertEqual(1, len(snapshot['instances'])) self.assertEqual(first_instance_id, snapshot['instance']['id']) def test_share_snapshot_destroy_has_instances(self): snapshot = db_utils.create_snapshot(with_share=True) self.assertRaises(exception.InvalidShareSnapshot, db_api.share_snapshot_destroy, context.get_admin_context(), snapshot['id']) class ShareExportLocationsDatabaseAPITestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.ctxt = context.get_admin_context() def test_update_valid_order(self): share = db_utils.create_share() initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3'] update_locations = ['fake4/4', 'fake2/2', 'fake3/3'] # add initial locations db_api.share_export_locations_update(self.ctxt, share.instance['id'], initial_locations, False) # update locations db_api.share_export_locations_update(self.ctxt, share.instance['id'], update_locations, True) actual_result = db_api.share_export_locations_get(self.ctxt, share['id']) # actual result should contain locations in exact same order self.assertTrue(actual_result == update_locations) def test_update_string(self): share = db_utils.create_share() initial_location = 'fake1/1/' db_api.share_export_locations_update(self.ctxt, share.instance['id'], initial_location, False) actual_result = db_api.share_export_locations_get(self.ctxt, share['id']) self.assertTrue(actual_result == [initial_location]) def test_get_admin_export_locations(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = [ {'path': 'fake1/1/', 'is_admin_only': True}, {'path': 'fake2/2/', 'is_admin_only': True}, {'path': 'fake3/3/', 'is_admin_only': True}, ] db_api.share_export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.share_export_locations_get(ctxt_user, share['id']) self.assertEqual([], user_result) admin_result = db_api.share_export_locations_get( self.ctxt, share['id']) self.assertEqual(3, len(admin_result)) for location in locations: self.assertIn(location['path'], admin_result) def test_get_user_export_locations(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = [ {'path': 'fake1/1/', 'is_admin_only': False}, {'path': 'fake2/2/', 'is_admin_only': False}, {'path': 'fake3/3/', 'is_admin_only': False}, ] db_api.share_export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.share_export_locations_get(ctxt_user, share['id']) self.assertEqual(3, len(user_result)) for location in locations: self.assertIn(location['path'], user_result) admin_result = db_api.share_export_locations_get( self.ctxt, share['id']) self.assertEqual(3, len(admin_result)) for location in locations: self.assertIn(location['path'], admin_result) def test_get_user_export_locations_old_view(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = ['fake1/1/', 'fake2/2', 'fake3/3'] db_api.share_export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.share_export_locations_get(ctxt_user, share['id']) self.assertEqual(locations, user_result) admin_result = db_api.share_export_locations_get( self.ctxt, share['id']) self.assertEqual(locations, admin_result) @ddt.ddt class ShareInstanceExportLocationsMetadataDatabaseAPITestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.ctxt = context.get_admin_context() self.share = db_utils.create_share() self.initial_locations = ['/fake/foo/', '/fake/bar', '/fake/quuz'] db_api.share_export_locations_update( self.ctxt, self.share.instance['id'], self.initial_locations, delete=False) def _get_export_location_uuid_by_path(self, path): els = db_api.share_export_locations_get_by_share_id( self.ctxt, self.share.id) export_location_uuid = None for el in els: if el.path == path: export_location_uuid = el.uuid self.assertFalse(export_location_uuid is None) return export_location_uuid def test_get_export_locations_by_share_id(self): els = db_api.share_export_locations_get_by_share_id( self.ctxt, self.share.id) self.assertEqual(3, len(els)) for path in self.initial_locations: self.assertTrue(any([path in el.path for el in els])) def test_get_export_locations_by_share_instance_id(self): els = db_api.share_export_locations_get_by_share_instance_id( self.ctxt, self.share.instance.id) self.assertEqual(3, len(els)) for path in self.initial_locations: self.assertTrue(any([path in el.path for el in els])) def test_export_location_metadata_update_delete(self): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = { 'foo_key': 'foo_value', 'bar_key': 'bar_value', 'quuz_key': 'quuz_value', } db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) db_api.export_location_metadata_delete( self.ctxt, export_location_uuid, list(metadata.keys())[0:-1]) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) key = list(metadata.keys())[-1] self.assertEqual({key: metadata[key]}, result) db_api.export_location_metadata_delete( self.ctxt, export_location_uuid) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual({}, result) def test_export_location_metadata_update_get(self): # Write metadata for target export location export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) # Write metadata for some concurrent export location other_export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[1]) other_metadata = {'key_from_other_el': 'value_of_key_from_other_el'} db_api.export_location_metadata_update( self.ctxt, other_export_location_uuid, other_metadata, False) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(metadata, result) updated_metadata = { 'foo_key': metadata['foo_key'], 'quuz_key': 'quuz_value', } db_api.export_location_metadata_update( self.ctxt, export_location_uuid, updated_metadata, True) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(updated_metadata, result) @ddt.data( ("k", "v"), ("k" * 256, "v"), ("k", "v" * 1024), ("k" * 256, "v" * 1024), ) @ddt.unpack def test_set_metadata_with_different_length(self, key, value): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[1]) metadata = {key: value} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(metadata, result) @ddt.ddt class DriverPrivateDataDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(DriverPrivateDataDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def _get_driver_test_data(self): return ("fake@host", uuidutils.generate_uuid()) @ddt.data({"details": {"foo": "bar", "tee": "too"}, "valid": {"foo": "bar", "tee": "too"}}, {"details": {"foo": "bar", "tee": ["test"]}, "valid": {"foo": "bar", "tee": six.text_type(["test"])}}) @ddt.unpack def test_update(self, details, valid): test_host, test_id = self._get_driver_test_data() initial_data = db_api.driver_private_data_get( self.ctxt, test_host, test_id) db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) actual_data = db_api.driver_private_data_get( self.ctxt, test_host, test_id) self.assertEqual({}, initial_data) self.assertEqual(valid, actual_data) def test_update_with_duplicate(self): test_host, test_id = self._get_driver_test_data() details = {"tee": "too"} db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) actual_result = db_api.driver_private_data_get( self.ctxt, test_host, test_id) self.assertEqual(details, actual_result) def test_update_with_delete_existing(self): test_host, test_id = self._get_driver_test_data() details = {"key1": "val1", "key2": "val2", "key3": "val3"} details_update = {"key1": "val1_upd", "key4": "new_val"} # Create new details db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) db_api.driver_private_data_update(self.ctxt, test_host, test_id, details_update, delete_existing=True) actual_result = db_api.driver_private_data_get( self.ctxt, test_host, test_id) self.assertEqual(details_update, actual_result) def test_get(self): test_host, test_id = self._get_driver_test_data() test_key = "foo" test_keys = [test_key, "tee"] details = {test_keys[0]: "val", test_keys[1]: "val", "mee": "foo"} db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) actual_result_all = db_api.driver_private_data_get( self.ctxt, test_host, test_id) actual_result_single_key = db_api.driver_private_data_get( self.ctxt, test_host, test_id, test_key) actual_result_list = db_api.driver_private_data_get( self.ctxt, test_host, test_id, test_keys) self.assertEqual(details, actual_result_all) self.assertEqual(details[test_key], actual_result_single_key) self.assertEqual(dict.fromkeys(test_keys, "val"), actual_result_list) def test_delete_single(self): test_host, test_id = self._get_driver_test_data() test_key = "foo" details = {test_key: "bar", "tee": "too"} valid_result = {"tee": "too"} db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) db_api.driver_private_data_delete(self.ctxt, test_host, test_id, test_key) actual_result = db_api.driver_private_data_get( self.ctxt, test_host, test_id) self.assertEqual(valid_result, actual_result) def test_delete_all(self): test_host, test_id = self._get_driver_test_data() details = {"foo": "bar", "tee": "too"} db_api.driver_private_data_update(self.ctxt, test_host, test_id, details) db_api.driver_private_data_delete(self.ctxt, test_host, test_id) actual_result = db_api.driver_private_data_get( self.ctxt, test_host, test_id) self.assertEqual({}, actual_result) @ddt.ddt class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase): def __init__(self, *args, **kwargs): super(ShareNetworkDatabaseAPITestCase, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def setUp(self): super(ShareNetworkDatabaseAPITestCase, self).setUp() self.share_nw_dict = {'id': 'fake network id', 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'project_id': self.fake_context.project_id, 'user_id': 'fake_user_id', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'whatever', 'description': 'fake description'} def test_create_one_network(self): result = db_api.share_network_create(self.fake_context, self.share_nw_dict) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(0, len(result['share_instances'])) self.assertEqual(0, len(result['security_services'])) def test_create_two_networks_in_different_tenants(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] = None share_nw_dict2['project_id'] = 'fake project 2' result1 = db_api.share_network_create(self.fake_context, self.share_nw_dict) result2 = db_api.share_network_create(self.fake_context, share_nw_dict2) self._check_fields(expected=self.share_nw_dict, actual=result1) self._check_fields(expected=share_nw_dict2, actual=result2) def test_create_two_networks_in_one_tenant(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] += "suffix" result1 = db_api.share_network_create(self.fake_context, self.share_nw_dict) result2 = db_api.share_network_create(self.fake_context, share_nw_dict2) self._check_fields(expected=self.share_nw_dict, actual=result1) self._check_fields(expected=share_nw_dict2, actual=result2) def test_create_with_duplicated_id(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(db_exception.DBDuplicateEntry, db_api.share_network_create, self.fake_context, self.share_nw_dict) def test_get(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(0, len(result['share_instances'])) self.assertEqual(0, len(result['security_services'])) @ddt.data([{'id': 'fake share id1'}], [{'id': 'fake share id1'}, {'id': 'fake share id2'}],) def test_get_with_shares(self, shares): db_api.share_network_create(self.fake_context, self.share_nw_dict) share_instances = [] for share in shares: share.update({'share_network_id': self.share_nw_dict['id']}) share_instances.append( db_api.share_create(self.fake_context, share).instance ) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(shares), len(result['share_instances'])) for index, share_instance in enumerate(share_instances): self.assertEqual( share_instance['share_network_id'], result['share_instances'][index]['share_network_id'] ) @ddt.data([{'id': 'fake security service id1', 'type': 'fake type'}], [{'id': 'fake security service id1', 'type': 'fake type'}, {'id': 'fake security service id2', 'type': 'fake type'}]) def test_get_with_security_services(self, security_services): db_api.share_network_create(self.fake_context, self.share_nw_dict) for service in security_services: service.update({'project_id': self.fake_context.project_id}) db_api.security_service_create(self.fake_context, service) db_api.share_network_add_security_service( self.fake_context, self.share_nw_dict['id'], service['id']) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(security_services), len(result['security_services'])) for index, service in enumerate(security_services): self._check_fields(expected=service, actual=result['security_services'][index]) def test_get_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, 'fake id') def test_delete(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_delete(self.fake_context, self.share_nw_dict['id']) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, self.share_nw_dict['id']) def test_delete_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_delete, self.fake_context, 'fake id') def test_update(self): new_name = 'fake_new_name' db_api.share_network_create(self.fake_context, self.share_nw_dict) result_update = db_api.share_network_update(self.fake_context, self.share_nw_dict['id'], {'name': new_name}) result_get = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(new_name, result_update['name']) self._check_fields(expected=dict(result_update.items()), actual=dict(result_get.items())) def test_update_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_update, self.fake_context, 'fake id', {}) @ddt.data(1, 2) def test_get_all_one_record(self, records_count): index = 0 share_networks = [] while index < records_count: share_network_dict = dict(self.share_nw_dict) fake_id = 'fake_id%s' % index share_network_dict.update({'id': fake_id, 'neutron_subnet_id': fake_id}) share_networks.append(share_network_dict) db_api.share_network_create(self.fake_context, share_network_dict) index += 1 result = db_api.share_network_get_all(self.fake_context) self.assertEqual(len(share_networks), len(result)) for index, net in enumerate(share_networks): self._check_fields(expected=net, actual=result[index]) def test_get_all_by_project(self): share_nw_dict2 = dict(self.share_nw_dict) share_nw_dict2['id'] = 'fake share nw id2' share_nw_dict2['project_id'] = 'fake project 2' share_nw_dict2['neutron_subnet_id'] = 'fake subnet id2' db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_create(self.fake_context, share_nw_dict2) result = db_api.share_network_get_all_by_project( self.fake_context, share_nw_dict2['project_id']) self.assertEqual(1, len(result)) self._check_fields(expected=share_nw_dict2, actual=result[0]) def test_add_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) result = db_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation).\ filter_by(security_service_id=security_dict1['id']).\ filter_by(share_network_id=self.share_nw_dict['id']).first() self.assertTrue(result is not None) def test_add_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_add_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_add_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_add_security_service_association_error_already_associated(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) self.assertRaises( exception.ShareNetworkSecurityServiceAssociationError, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_remove_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) db_api.share_network_remove_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) result = db_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation).\ filter_by(security_service_id=security_dict1['id']).\ filter_by(share_network_id=self.share_nw_dict['id']).first() self.assertTrue(result is None) share_nw_ref = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(share_nw_ref['security_services'])) def test_remove_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_remove_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_remove_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_remove_security_service_dissociation_error(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises( exception.ShareNetworkSecurityServiceDissociationError, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_security_services_relation(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(result['security_services'])) def test_shares_relation(self): share_dict = {'id': 'fake share id1'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_create(self.fake_context, share_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(result['share_instances'])) @ddt.ddt class SecurityServiceDatabaseAPITestCase(BaseDatabaseAPITestCase): def __init__(self, *args, **kwargs): super(SecurityServiceDatabaseAPITestCase, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def _check_expected_fields(self, result, expected): for key in expected: self.assertEqual(expected[key], result[key]) def test_create(self): result = db_api.security_service_create(self.fake_context, security_service_dict) self._check_expected_fields(result, security_service_dict) def test_create_with_duplicated_id(self): db_api.security_service_create(self.fake_context, security_service_dict) self.assertRaises(db_exception.DBDuplicateEntry, db_api.security_service_create, self.fake_context, security_service_dict) def test_get(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_get(self.fake_context, security_service_dict['id']) self._check_expected_fields(result, security_service_dict) def test_get_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, 'wrong id') def test_delete(self): db_api.security_service_create(self.fake_context, security_service_dict) db_api.security_service_delete(self.fake_context, security_service_dict['id']) self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, security_service_dict['id']) def test_update(self): update_dict = { 'dns_ip': 'new dns', 'server': 'new ldap server', 'domain': 'new ldap domain', 'user': 'new user', 'password': 'new password', 'name': 'new whatever', 'description': 'new nevermind', } db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], update_dict) self._check_expected_fields(result, update_dict) def test_update_no_updates(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], {}) self._check_expected_fields(result, security_service_dict) def test_update_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_update, self.fake_context, 'wrong id', {}) def test_get_all_no_records(self): result = db_api.security_service_get_all(self.fake_context) self.assertEqual(0, len(result)) @ddt.data(1, 2) def test_get_all(self, records_count): index = 0 services = [] while index < records_count: service_dict = dict(security_service_dict) service_dict.update({'id': 'fake_id%s' % index}) services.append(service_dict) db_api.security_service_create(self.fake_context, service_dict) index += 1 result = db_api.security_service_get_all(self.fake_context) self.assertEqual(len(services), len(result)) for index, service in enumerate(services): self._check_fields(expected=service, actual=result[index]) def test_get_all_two_records(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result = db_api.security_service_get_all(self.fake_context) self.assertEqual(2, len(result)) def test_get_all_by_project(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' dict2['project_id'] = 'fake project 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result1 = db_api.security_service_get_all_by_project( self.fake_context, dict1['project_id']) self.assertEqual(1, len(result1)) self._check_expected_fields(result1[0], dict1) result2 = db_api.security_service_get_all_by_project( self.fake_context, dict2['project_id']) self.assertEqual(1, len(result2)) self._check_expected_fields(result2[0], dict2) class ShareServerDatabaseAPITestCase(test.TestCase): def setUp(self): super(ShareServerDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) def test_share_server_get(self): expected = db_utils.create_share_server() server = db_api.share_server_get(self.ctxt, expected['id']) self.assertEqual(expected['id'], server['id']) self.assertEqual(expected.share_network_id, server.share_network_id) self.assertEqual(expected.host, server.host) self.assertEqual(expected.status, server.status) def test_get_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_get, self.ctxt, fake_id) def test_create(self): server = db_utils.create_share_server() self.assertTrue(server['id']) self.assertEqual(server.share_network_id, server['share_network_id']) self.assertEqual(server.host, server['host']) self.assertEqual(server.status, server['status']) def test_delete(self): server = db_utils.create_share_server() num_records = len(db_api.share_server_get_all(self.ctxt)) db_api.share_server_delete(self.ctxt, server['id']) self.assertEqual(num_records - 1, len(db_api.share_server_get_all(self.ctxt))) def test_delete_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_delete, self.ctxt, fake_id) def test_update(self): update = { 'share_network_id': 'update_net', 'host': 'update_host', 'status': constants.STATUS_ACTIVE, } server = db_utils.create_share_server() updated_server = db_api.share_server_update(self.ctxt, server['id'], update) self.assertEqual(server['id'], updated_server['id']) self.assertEqual(update['share_network_id'], updated_server.share_network_id) self.assertEqual(update['host'], updated_server.host) self.assertEqual(update['status'], updated_server.status) def test_update_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_update, self.ctxt, fake_id, {}) def test_get_all_by_host_and_share_net_valid(self): valid = { 'share_network_id': '1', 'host': 'host1', 'status': constants.STATUS_ACTIVE, } invalid = { 'share_network_id': '1', 'host': 'host1', 'status': constants.STATUS_ERROR, } other = { 'share_network_id': '2', 'host': 'host2', 'status': constants.STATUS_ACTIVE, } valid = db_utils.create_share_server(**valid) db_utils.create_share_server(**invalid) db_utils.create_share_server(**other) servers = db_api.share_server_get_all_by_host_and_share_net_valid( self.ctxt, host='host1', share_net_id='1') self.assertEqual(valid['id'], servers[0]['id']) def test_get_all_by_host_and_share_net_not_found(self): self.assertRaises( exception.ShareServerNotFound, db_api.share_server_get_all_by_host_and_share_net_valid, self.ctxt, host='fake', share_net_id='fake' ) def test_get_all(self): srv1 = { 'share_network_id': '1', 'host': 'host1', 'status': constants.STATUS_ACTIVE, } srv2 = { 'share_network_id': '1', 'host': 'host1', 'status': constants.STATUS_ERROR, } srv3 = { 'share_network_id': '2', 'host': 'host2', 'status': constants.STATUS_ACTIVE, } servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(0, len(servers)) to_delete = db_utils.create_share_server(**srv1) db_utils.create_share_server(**srv2) db_utils.create_share_server(**srv3) servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(3, len(servers)) db_api.share_server_delete(self.ctxt, to_delete['id']) servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(2, len(servers)) def test_backend_details_set(self): details = { 'value1': '1', 'value2': '2', } server = db_utils.create_share_server() db_api.share_server_backend_details_set(self.ctxt, server['id'], details) self.assertDictMatch( details, db_api.share_server_get(self.ctxt, server['id'])['backend_details'] ) def test_backend_details_set_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_backend_details_set, self.ctxt, fake_id, {}) def test_get_with_details(self): values = { 'share_network_id': 'fake-share-net-id', 'host': 'hostname', 'status': constants.STATUS_ACTIVE, } details = { 'value1': '1', 'value2': '2', } srv_id = db_utils.create_share_server(**values)['id'] db_api.share_server_backend_details_set(self.ctxt, srv_id, details) server = db_api.share_server_get(self.ctxt, srv_id) self.assertEqual(srv_id, server['id']) self.assertEqual(values['share_network_id'], server.share_network_id) self.assertEqual(values['host'], server.host) self.assertEqual(values['status'], server.status) self.assertDictMatch(server['backend_details'], details) self.assertTrue('backend_details' in server.to_dict()) def test_delete_with_details(self): server = db_utils.create_share_server(backend_details={ 'value1': '1', 'value2': '2', }) num_records = len(db_api.share_server_get_all(self.ctxt)) db_api.share_server_delete(self.ctxt, server['id']) self.assertEqual(num_records - 1, len(db_api.share_server_get_all(self.ctxt))) class ServiceDatabaseAPITestCase(test.TestCase): def setUp(self): super(ServiceDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) self.service_data = {'host': "fake_host", 'binary': "fake_binary", 'topic': "fake_topic", 'report_count': 0, 'availability_zone': "fake_zone"} def test_create(self): service = db_api.service_create(self.ctxt, self.service_data) az = db_api.availability_zone_get(self.ctxt, "fake_zone") self.assertEqual(az.id, service.availability_zone_id) self.assertSubDictMatch(self.service_data, service.to_dict()) def test_update(self): az_name = 'fake_zone2' update_data = {"availability_zone": az_name} service = db_api.service_create(self.ctxt, self.service_data) db_api.service_update(self.ctxt, service['id'], update_data) service = db_api.service_get(self.ctxt, service['id']) az = db_api.availability_zone_get(self.ctxt, az_name) self.assertEqual(az.id, service.availability_zone_id) valid_values = self.service_data valid_values.update(update_data) self.assertSubDictMatch(valid_values, service.to_dict()) @ddt.ddt class AvailabilityZonesDatabaseAPITestCase(test.TestCase): def setUp(self): super(AvailabilityZonesDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) @ddt.data({'fake': 'fake'}, {}, {'fakeavailability_zone': 'fake'}, {'availability_zone': None}, {'availability_zone': ''}) def test_ensure_availability_zone_exists_invalid(self, test_values): session = db_api.get_session() self.assertRaises(ValueError, db_api.ensure_availability_zone_exists, self.ctxt, test_values, session) def test_az_get(self): az_name = 'test_az' az = db_api.availability_zone_create_if_not_exist(self.ctxt, az_name) az_by_id = db_api.availability_zone_get(self.ctxt, az['id']) az_by_name = db_api.availability_zone_get(self.ctxt, az_name) self.assertEqual(az_name, az_by_id['name']) self.assertEqual(az_name, az_by_name['name']) self.assertEqual(az['id'], az_by_id['id']) self.assertEqual(az['id'], az_by_name['id']) def test_az_get_all(self): db_api.availability_zone_create_if_not_exist(self.ctxt, 'test1') db_api.availability_zone_create_if_not_exist(self.ctxt, 'test2') db_api.availability_zone_create_if_not_exist(self.ctxt, 'test3') db_api.service_create(self.ctxt, {'availability_zone': 'test2'}) actual_result = db_api.availability_zone_get_all(self.ctxt) self.assertEqual(1, len(actual_result)) self.assertEqual('test2', actual_result[0]['name']) @ddt.ddt class NetworkAllocationsDatabaseAPITestCase(test.TestCase): def setUp(self): super(NetworkAllocationsDatabaseAPITestCase, self).setUp() self.user_id = 'user_id' self.project_id = 'project_id' self.share_server_id = 'foo_share_server_id' self.ctxt = context.RequestContext( user_id=self.user_id, project_id=self.project_id, is_admin=True) self.user_network_allocations = [ {'share_server_id': self.share_server_id, 'ip_address': '1.1.1.1', 'status': constants.STATUS_ACTIVE, 'label': None}, {'share_server_id': self.share_server_id, 'ip_address': '2.2.2.2', 'status': constants.STATUS_ACTIVE, 'label': 'user'}, ] self.admin_network_allocations = [ {'share_server_id': self.share_server_id, 'ip_address': '3.3.3.3', 'status': constants.STATUS_ACTIVE, 'label': 'admin'}, {'share_server_id': self.share_server_id, 'ip_address': '4.4.4.4', 'status': constants.STATUS_ACTIVE, 'label': 'admin'}, ] def _setup_network_allocations_get_for_share_server(self): # Create share network share_network_data = { 'id': 'foo_share_network_id', 'user_id': self.user_id, 'project_id': self.project_id, } db_api.share_network_create(self.ctxt, share_network_data) # Create share server share_server_data = { 'id': self.share_server_id, 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } db_api.share_server_create(self.ctxt, share_server_data) # Create user network allocations for user_network_allocation in self.user_network_allocations: db_api.network_allocation_create( self.ctxt, user_network_allocation) # Create admin network allocations for admin_network_allocation in self.admin_network_allocations: db_api.network_allocation_create( self.ctxt, admin_network_allocation) def test_get_only_user_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label='user') self.assertEqual( len(self.user_network_allocations), len(result)) for na in result: self.assertIn(na.label, (None, 'user')) def test_get_only_admin_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label='admin') self.assertEqual( len(self.admin_network_allocations), len(result)) for na in result: self.assertEqual(na.label, 'admin') def test_get_all_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label=None) self.assertEqual( len(self.user_network_allocations + self.admin_network_allocations), len(result) ) for na in result: self.assertIn(na.label, ('admin', 'user', None)) manila-2.0.0/manila/tests/db/sqlalchemy/test_models.py0000664000567000056710000002161512701407107024145 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Testing of SQLAlchemy model classes.""" import ddt from manila.common import constants from manila import test from manila.tests import db_utils @ddt.ddt class ShareTestCase(test.TestCase): """Testing of SQLAlchemy Share model class.""" def setUp(self): super(ShareTestCase, self).setUp() @ddt.data(constants.STATUS_MANAGE_ERROR, constants.STATUS_CREATING, constants.STATUS_EXTENDING, constants.STATUS_DELETING, constants.STATUS_EXTENDING_ERROR, constants.STATUS_ERROR_DELETING, constants.STATUS_MANAGING, constants.STATUS_MANAGE_ERROR) def test_share_instance_available(self, status): instance_list = [ db_utils.create_share_instance(status=constants.STATUS_AVAILABLE, share_id='fake_id'), db_utils.create_share_instance(status=status, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual(constants.STATUS_AVAILABLE, share1.instance['status']) self.assertEqual(constants.STATUS_AVAILABLE, share2.instance['status']) @ddt.data([constants.STATUS_MANAGE_ERROR, constants.STATUS_CREATING], [constants.STATUS_ERROR_DELETING, constants.STATUS_DELETING], [constants.STATUS_ERROR, constants.STATUS_MANAGING], [constants.STATUS_UNMANAGE_ERROR, constants.STATUS_UNMANAGING], [constants.STATUS_INACTIVE, constants.STATUS_EXTENDING], [constants.STATUS_SHRINKING_ERROR, constants.STATUS_SHRINKING]) @ddt.unpack def test_share_instance_not_transitional(self, status, trans_status): instance_list = [ db_utils.create_share_instance(status=status, share_id='fake_id'), db_utils.create_share_instance(status=trans_status, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual(status, share1.instance['status']) self.assertEqual(status, share2.instance['status']) def test_share_instance_creating(self): share = db_utils.create_share(status=constants.STATUS_CREATING) self.assertEqual(constants.STATUS_CREATING, share.instance['status']) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_CREATING) def test_share_instance_replication_change(self, status): instance_list = [ db_utils.create_share_instance( status=constants.STATUS_REPLICATION_CHANGE, share_id='fake_id'), db_utils.create_share_instance( status=status, share_id='fake_id'), db_utils.create_share_instance( status=constants.STATUS_ERROR_DELETING, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual( constants.STATUS_REPLICATION_CHANGE, share1.instance['status']) self.assertEqual( constants.STATUS_REPLICATION_CHANGE, share2.instance['status']) def test_share_instance_prefer_active_instance(self): instance_list = [ db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_IN_SYNC), db_utils.create_share_instance( status=constants.STATUS_CREATING, share_id='fake_id', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC), db_utils.create_share_instance( status=constants.STATUS_ERROR, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE), db_utils.create_share_instance( status=constants.STATUS_MANAGING, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE), ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual( constants.STATUS_ERROR, share1.instance['status']) self.assertEqual( constants.STATUS_ERROR, share2.instance['status']) def test_access_rules_status_no_instances(self): share = db_utils.create_share(instances=[]) self.assertEqual(constants.STATUS_ACTIVE, share.access_rules_status) @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_OUT_OF_SYNC, constants.STATUS_ERROR) def test_access_rules_status(self, access_status): instances = [ db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR, access_rules_status=constants.STATUS_ACTIVE), db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, access_rules_status=constants.STATUS_ACTIVE), db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, access_rules_status=access_status), ] share = db_utils.create_share(instances=instances) self.assertEqual(access_status, share.access_rules_status) @ddt.ddt class ShareSnapshotTestCase(test.TestCase): """Testing of SQLAlchemy ShareSnapshot model class.""" def test_instance_and_proxified_properties(self): in_sync_replica_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_IN_SYNC) active_replica_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE) out_of_sync_replica_instance = db_utils.create_share_instance( status=constants.STATUS_ERROR, share_id='fake_id', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) non_replica_instance = db_utils.create_share_instance( status=constants.STATUS_CREATING, share_id='fake_id') share_instances = [ in_sync_replica_instance, active_replica_instance, out_of_sync_replica_instance, non_replica_instance, ] share = db_utils.create_share(instances=share_instances) snapshot_instance_list = [ db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_CREATING, share_instance_id=out_of_sync_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_ERROR, share_instance_id=in_sync_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_AVAILABLE, provider_location='hogsmeade:snapshot1', progress='87%', share_instance_id=active_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_MANAGING, share_instance_id=non_replica_instance['id']), ] snapshot = db_utils.create_snapshot( id='fake_snapshot_id', share_id=share['id'], instances=snapshot_instance_list) # Proxified properties self.assertEqual(constants.STATUS_AVAILABLE, snapshot['status']) self.assertEqual(constants.STATUS_ERROR, snapshot['aggregate_status']) self.assertEqual('hogsmeade:snapshot1', snapshot['provider_location']) self.assertEqual('87%', snapshot['progress']) # Snapshot properties expected_share_name = '-'.join(['share', share['id']]) self.assertEqual(expected_share_name, snapshot['share_name']) self.assertEqual(active_replica_instance['id'], snapshot['instance']['share_instance_id']) manila-2.0.0/manila/tests/db/migrations/0000775000567000056710000000000012701407265021263 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/migrations/__init__.py0000664000567000056710000000000012701407107023355 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/migrations/alembic/0000775000567000056710000000000012701407265022657 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/migrations/alembic/test_migration.py0000664000567000056710000001655112701407112026260 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. """ from alembic import script import mock from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations from oslo_log import log from sqlalchemy.sql import text from manila.db.migrations.alembic import migration from manila.tests.db.migrations.alembic import migrations_data_checks from manila.tests import utils as test_utils LOG = log.getLogger('manila.tests.test_migrations') class ManilaMigrationsCheckers(test_migrations.WalkVersionsMixin, migrations_data_checks.DbMigrationsData): """Test alembic migrations.""" @property def snake_walk(self): return True @property def downgrade(self): return True @property def INIT_VERSION(self): pass @property def REPOSITORY(self): pass @property def migration_api(self): return migration @property def migrate_engine(self): return self.engine def _walk_versions(self, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Place the database under version control alembic_cfg = migration._alembic_config() script_directory = script.ScriptDirectory.from_config(alembic_cfg) self.assertIsNone(self.migration_api.version()) versions = [ver for ver in script_directory.walk_revisions()] LOG.debug('latest version is %s', versions[0].revision) for version in reversed(versions): self._migrate_up(version.revision, with_data=True) if snake_walk: downgraded = self._migrate_down( version, with_data=True) if downgraded: self._migrate_up(version.revision) if downgrade: for version in versions: downgraded = self._migrate_down(version) if snake_walk and downgraded: self._migrate_up(version.revision) self._migrate_down(version) def _migrate_down(self, version, with_data=False): try: self.migration_api.downgrade(version.down_revision) except NotImplementedError: # NOTE(sirp): some migrations, namely release-level # migrations, don't support a downgrade. return False self.assertEqual(version.down_revision, self.migration_api.version()) if with_data: post_downgrade = getattr( self, "_post_downgrade_%s" % version.revision, None) if post_downgrade: post_downgrade(self.engine) return True def _migrate_up(self, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % version, None) if pre_upgrade: data = pre_upgrade(self.engine) self.migration_api.upgrade(version) self.assertEqual(version, self.migration_api.version()) if with_data: check = getattr(self, "_check_%s" % version, None) if check: check(self.engine, data) except Exception as e: LOG.error("Failed to migrate to version %(version)s on engine " "%(engine)s. Exception while running the migration: " "%(exception)s", {'version': version, 'engine': self.engine, 'exception': e}) raise # NOTE(vponomaryov): set 5 minutes timeout for case of running it on # very slow nodes/VMs. Note, that this test becomes slower with each # addition of new DB migration. On fast nodes it can take about 5-10 secs # having Mitaka set of migrations. # 'pymysql' works much slower on slow nodes than 'psycopg2'. And such # timeout mostly required for testing of 'mysql' backend. @test_utils.set_timeout(300) def test_walk_versions(self): """Walks all version scripts for each tested database. While walking, ensur that there are no errors in the version scripts for each engine. """ with mock.patch('manila.db.sqlalchemy.api.get_engine', return_value=self.engine): self._walk_versions(snake_walk=self.snake_walk, downgrade=self.downgrade) def test_single_branch(self): alembic_cfg = migration._alembic_config() script_directory = script.ScriptDirectory.from_config(alembic_cfg) actual_result = script_directory.get_heads() self.assertEqual(1, len(actual_result), "Db migrations should have only one branch.") class TestManilaMigrationsMySQL(ManilaMigrationsCheckers, test_base.MySQLOpportunisticTestCase): """Run migration tests on MySQL backend.""" @test_utils.set_timeout(300) def test_mysql_innodb(self): """Test that table creation on mysql only builds InnoDB tables.""" with mock.patch('manila.db.sqlalchemy.api.get_engine', return_value=self.engine): self._walk_versions(snake_walk=False, downgrade=False) # sanity check sanity_check = """SELECT count(*) FROM information_schema.tables WHERE table_schema = :database;""" total = self.engine.execute( text(sanity_check), database=self.engine.url.database) self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") noninnodb_query = """ SELECT count(*) FROM information_schema.TABLES WHERE table_schema = :database AND engine != 'InnoDB' AND table_name != 'alembic_version';""" count = self.engine.execute( text(noninnodb_query), database=self.engine.url.database ).scalar() self.assertEqual(0, count, "%d non InnoDB tables created" % count) class TestManilaMigrationsPostgreSQL( ManilaMigrationsCheckers, test_base.PostgreSQLOpportunisticTestCase): """Run migration tests on PostgreSQL backend.""" manila-2.0.0/manila/tests/db/migrations/alembic/__init__.py0000664000567000056710000000000012701407107024751 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/db/migrations/alembic/migrations_data_checks.py0000664000567000056710000005334112701407107027717 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests data for database migrations. All database migrations with data manipulation (like moving data from column to the table) should have data check class: @map_to_migration('1f0bd302c1a6') # Revision of checked db migration class FooMigrationChecks(BaseMigrationChecks): def setup_upgrade_data(self, engine): ... def check_upgrade(self, engine, data): ... def check_downgrade(self, engine): ... See BaseMigrationChecks class for more information. """ import abc import datetime from oslo_utils import uuidutils import six from sqlalchemy import exc as sa_exc from manila.db.migrations import utils class DbMigrationsData(object): migration_mappings = {} methods_mapping = { 'pre': 'setup_upgrade_data', 'check': 'check_upgrade', 'post': 'check_downgrade', } def __getattr__(self, item): parts = item.split('_') is_mapping_method = ( len(parts) > 2 and parts[0] == '' and parts[1] in self.methods_mapping ) if not is_mapping_method: return super(DbMigrationsData, self).__getattribute__(item) check_obj = self.migration_mappings.get(parts[-1], None) if check_obj is None: raise AttributeError check_obj.set_test_case(self) return getattr(check_obj, self.methods_mapping.get(parts[1])) def map_to_migration(revision): def decorator(cls): DbMigrationsData.migration_mappings[revision] = cls() return cls return decorator class BaseMigrationChecks(object): six.add_metaclass(abc.ABCMeta) def __init__(self): self.test_case = None def set_test_case(self, test_case): self.test_case = test_case @abc.abstractmethod def setup_upgrade_data(self, engine): """This method should be used to insert test data for migration. :param engine: SQLAlchemy engine :return: any data which will be passed to 'check_upgrade' as 'data' arg """ @abc.abstractmethod def check_upgrade(self, engine, data): """This method should be used to do assertions after upgrade method. To perform assertions use 'self.test_case' instance property: self.test_case.assertTrue(True) :param engine: SQLAlchemy engine :param data: data returned by 'setup_upgrade_data' """ @abc.abstractmethod def check_downgrade(self, engine): """This method should be used to do assertions after downgrade method. To perform assertions use 'self.test_case' instance property: self.test_case.assertTrue(True) :param engine: SQLAlchemy engine """ @map_to_migration('1f0bd302c1a6') class AvailabilityZoneMigrationChecks(BaseMigrationChecks): valid_az_names = ('az1', 'az2') def _get_service_data(self, options): base_dict = { 'binary': 'manila-share', 'topic': 'share', 'disabled': '0', 'report_count': '100', } base_dict.update(options) return base_dict def setup_upgrade_data(self, engine): service_fixture = [ self._get_service_data( {'deleted': 0, 'host': 'fake1', 'availability_zone': 'az1'} ), self._get_service_data( {'deleted': 0, 'host': 'fake2', 'availability_zone': 'az1'} ), self._get_service_data( {'deleted': 1, 'host': 'fake3', 'availability_zone': 'az2'} ), ] services_table = utils.load_table('services', engine) for fixture in service_fixture: engine.execute(services_table.insert(fixture)) def check_upgrade(self, engine, _): az_table = utils.load_table('availability_zones', engine) for az in engine.execute(az_table.select()): self.test_case.assertTrue(uuidutils.is_uuid_like(az.id)) self.test_case.assertTrue(az.name in self.valid_az_names) self.test_case.assertEqual('False', az.deleted) services_table = utils.load_table('services', engine) for service in engine.execute(services_table.select()): self.test_case.assertTrue( uuidutils.is_uuid_like(service.availability_zone_id) ) def check_downgrade(self, engine): services_table = utils.load_table('services', engine) for service in engine.execute(services_table.select()): self.test_case.assertIn( service.availability_zone, self.valid_az_names ) @map_to_migration('dda6de06349') class ShareInstanceExportLocationMetadataChecks(BaseMigrationChecks): el_table_name = 'share_instance_export_locations' elm_table_name = 'share_instance_export_locations_metadata' def setup_upgrade_data(self, engine): # Setup shares share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}] share_table = utils.load_table('shares', engine) for fixture in share_fixture: engine.execute(share_table.insert(fixture)) # Setup share instances si_fixture = [ {'id': 'foo_share_instance_id_oof', 'share_id': share_fixture[0]['id']}, {'id': 'bar_share_instance_id_rab', 'share_id': share_fixture[1]['id']}, ] si_table = utils.load_table('share_instances', engine) for fixture in si_fixture: engine.execute(si_table.insert(fixture)) # Setup export locations el_fixture = [ {'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']}, {'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']}, ] el_table = utils.load_table(self.el_table_name, engine) for fixture in el_fixture: engine.execute(el_table.insert(fixture)) def check_upgrade(self, engine, data): el_table = utils.load_table( 'share_instance_export_locations', engine) for el in engine.execute(el_table.select()): self.test_case.assertTrue(hasattr(el, 'is_admin_only')) self.test_case.assertTrue(hasattr(el, 'uuid')) self.test_case.assertEqual(False, el.is_admin_only) self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid)) # Write export location metadata el_metadata = [ {'key': 'foo_key', 'value': 'foo_value', 'export_location_id': 1}, {'key': 'bar_key', 'value': 'bar_value', 'export_location_id': 2}, ] elm_table = utils.load_table(self.elm_table_name, engine) engine.execute(elm_table.insert(el_metadata)) # Verify values of written metadata for el_meta_datum in el_metadata: el_id = el_meta_datum['export_location_id'] records = engine.execute(elm_table.select().where( elm_table.c.export_location_id == el_id)) self.test_case.assertEqual(1, records.rowcount) record = records.first() expected_keys = ( 'id', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'export_location_id', 'key', 'value', ) self.test_case.assertEqual(len(expected_keys), len(record.keys())) for key in expected_keys: self.test_case.assertIn(key, record.keys()) for k, v in el_meta_datum.items(): self.test_case.assertTrue(hasattr(record, k)) self.test_case.assertEqual(v, getattr(record, k)) def check_downgrade(self, engine): el_table = utils.load_table( 'share_instance_export_locations', engine) for el in engine.execute(el_table.select()): self.test_case.assertFalse(hasattr(el, 'is_admin_only')) self.test_case.assertFalse(hasattr(el, 'uuid')) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, self.elm_table_name, engine) @map_to_migration('344c1ac4747f') class AccessRulesStatusMigrationChecks(BaseMigrationChecks): def _get_instance_data(self, data): base_dict = {} base_dict.update(data) return base_dict def setup_upgrade_data(self, engine): share_table = utils.load_table('shares', engine) share = { 'id': 1, 'share_proto': "NFS", 'size': 0, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake', } engine.execute(share_table.insert(share)) rules1 = [ {'id': 'r1', 'share_instance_id': 1, 'state': 'active', 'deleted': 'False'}, {'id': 'r2', 'share_instance_id': 1, 'state': 'active', 'deleted': 'False'}, {'id': 'r3', 'share_instance_id': 1, 'state': 'deleting', 'deleted': 'False'}, ] rules2 = [ {'id': 'r4', 'share_instance_id': 2, 'state': 'active', 'deleted': 'False'}, {'id': 'r5', 'share_instance_id': 2, 'state': 'error', 'deleted': 'False'}, ] rules3 = [ {'id': 'r6', 'share_instance_id': 3, 'state': 'new', 'deleted': 'False'}, ] instance_fixtures = [ {'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1, 'status': 'available', 'rules': rules1}, {'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1, 'status': 'available', 'rules': rules2}, {'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1, 'status': 'available', 'rules': rules3}, {'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1, 'status': 'deleting', 'rules': []}, ] share_instances_table = utils.load_table('share_instances', engine) share_instances_rules_table = utils.load_table( 'share_instance_access_map', engine) for fixture in instance_fixtures: rules = fixture.pop('rules') engine.execute(share_instances_table.insert(fixture)) for rule in rules: engine.execute(share_instances_rules_table.insert(rule)) def check_upgrade(self, engine, _): instances_table = utils.load_table('share_instances', engine) valid_statuses = { '1': 'active', '2': 'error', '3': 'out_of_sync', '4': None, } instances = engine.execute(instances_table.select().where( instances_table.c.id in valid_statuses.keys())) for instance in instances: self.test_case.assertEqual(valid_statuses[instance['id']], instance['access_rules_status']) def check_downgrade(self, engine): share_instances_rules_table = utils.load_table( 'share_instance_access_map', engine) valid_statuses = { '1': 'active', '2': 'error', '3': 'error', '4': None, } for rule in engine.execute(share_instances_rules_table.select()): valid_state = valid_statuses[rule['share_instance_id']] self.test_case.assertEqual(valid_state, rule['state']) @map_to_migration('293fac1130ca') class ShareReplicationMigrationChecks(BaseMigrationChecks): valid_share_display_names = ('FAKE_SHARE_1', 'FAKE_SHARE_2', 'FAKE_SHARE_3') valid_share_ids = [] valid_replication_types = ('writable', 'readable', 'dr') def _load_tables_and_get_data(self, engine): share_table = utils.load_table('shares', engine) share_instances_table = utils.load_table('share_instances', engine) shares = engine.execute( share_table.select().where(share_table.c.id.in_( self.valid_share_ids)) ).fetchall() share_instances = engine.execute(share_instances_table.select().where( share_instances_table.c.share_id.in_(self.valid_share_ids)) ).fetchall() return shares, share_instances def _new_share(self, **kwargs): share = { 'id': uuidutils.generate_uuid(), 'display_name': 'fake_share', 'size': '1', 'deleted': 'False', 'share_proto': 'fake_proto', 'user_id': 'fake_user_id', 'project_id': 'fake_project_uuid', 'snapshot_support': '1', 'task_state': None, } share.update(kwargs) return share def _new_instance(self, share_id=None, **kwargs): instance = { 'id': uuidutils.generate_uuid(), 'share_id': share_id or uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'access_rules_status': 'active', } instance.update(kwargs) return instance def setup_upgrade_data(self, engine): shares_data = [] instances_data = [] self.valid_share_ids = [] for share_display_name in self.valid_share_display_names: share_ref = self._new_share(display_name=share_display_name) shares_data.append(share_ref) instances_data.append(self._new_instance(share_id=share_ref['id'])) shares_table = utils.load_table('shares', engine) for share in shares_data: self.valid_share_ids.append(share['id']) engine.execute(shares_table.insert(share)) shares_instances_table = utils.load_table('share_instances', engine) for share_instance in instances_data: engine.execute(shares_instances_table.insert(share_instance)) def check_upgrade(self, engine, _): shares, share_instances = self._load_tables_and_get_data(engine) share_ids = [share['id'] for share in shares] share_instance_share_ids = [share_instance['share_id'] for share_instance in share_instances] # Assert no data is lost for sid in self.valid_share_ids: self.test_case.assertIn(sid, share_ids) self.test_case.assertIn(sid, share_instance_share_ids) for share in shares: self.test_case.assertIn(share['display_name'], self.valid_share_display_names) self.test_case.assertEqual('False', share.deleted) self.test_case.assertTrue(hasattr(share, 'replication_type')) for share_instance in share_instances: self.test_case.assertTrue(hasattr(share_instance, 'replica_state')) def check_downgrade(self, engine): shares, share_instances = self._load_tables_and_get_data(engine) share_ids = [share['id'] for share in shares] share_instance_share_ids = [share_instance['share_id'] for share_instance in share_instances] # Assert no data is lost for sid in self.valid_share_ids: self.test_case.assertIn(sid, share_ids) self.test_case.assertIn(sid, share_instance_share_ids) for share in shares: self.test_case.assertEqual('False', share.deleted) self.test_case.assertIn(share.display_name, self.valid_share_display_names) self.test_case.assertFalse(hasattr(share, 'replication_type')) for share_instance in share_instances: self.test_case.assertEqual('False', share_instance.deleted) self.test_case.assertIn(share_instance.share_id, self.valid_share_ids) self.test_case.assertFalse( hasattr(share_instance, 'replica_state')) @map_to_migration('5155c7077f99') class NetworkAllocationsNewLabelColumnChecks(BaseMigrationChecks): table_name = 'network_allocations' ids = ['fake_network_allocation_id_%d' % i for i in (1, 2, 3)] def setup_upgrade_data(self, engine): user_id = 'user_id' project_id = 'project_id' share_server_id = 'foo_share_server_id' # Create share network share_network_data = { 'id': 'foo_share_network_id', 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', engine) engine.execute(sn_table.insert(share_network_data)) # Create share server share_server_data = { 'id': share_server_id, 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', engine) engine.execute(ss_table.insert(share_server_data)) # Create network allocations network_allocations = [ {'id': self.ids[0], 'share_server_id': share_server_id, 'ip_address': '1.1.1.1'}, {'id': self.ids[1], 'share_server_id': share_server_id, 'ip_address': '2.2.2.2'}, ] na_table = utils.load_table(self.table_name, engine) for network_allocation in network_allocations: engine.execute(na_table.insert(network_allocation)) def check_upgrade(self, engine, data): na_table = utils.load_table(self.table_name, engine) for na in engine.execute(na_table.select()): self.test_case.assertTrue(hasattr(na, 'label')) self.test_case.assertEqual(na.label, 'user') # Create admin network allocation network_allocations = [ {'id': self.ids[2], 'share_server_id': na.share_server_id, 'ip_address': '3.3.3.3', 'label': 'admin', 'network_type': 'vlan', 'segmentation_id': 1005, 'ip_version': 4, 'cidr': '240.0.0.0/16'}, ] engine.execute(na_table.insert(network_allocations)) # Select admin network allocations for na in engine.execute( na_table.select().where(na_table.c.label == 'admin')): self.test_case.assertTrue(hasattr(na, 'label')) self.test_case.assertEqual('admin', na.label) for col_name in ('network_type', 'segmentation_id', 'ip_version', 'cidr'): self.test_case.assertTrue(hasattr(na, col_name)) self.test_case.assertEqual( network_allocations[0][col_name], getattr(na, col_name)) def check_downgrade(self, engine): na_table = utils.load_table(self.table_name, engine) db_result = engine.execute(na_table.select()) self.test_case.assertTrue(db_result.rowcount >= len(self.ids)) for na in db_result: for col_name in ('label', 'network_type', 'segmentation_id', 'ip_version', 'cidr'): self.test_case.assertFalse(hasattr(na, col_name)) @map_to_migration('eb6d5544cbbd') class ShareSnapshotInstanceNewProviderLocationColumnChecks( BaseMigrationChecks): table_name = 'share_snapshot_instances' def setup_upgrade_data(self, engine): # Setup shares share_data = {'id': 'new_share_id'} s_table = utils.load_table('shares', engine) engine.execute(s_table.insert(share_data)) # Setup share instances share_instance_data = { 'id': 'new_share_instance_id', 'share_id': share_data['id'] } si_table = utils.load_table('share_instances', engine) engine.execute(si_table.insert(share_instance_data)) # Setup share snapshots share_snapshot_data = { 'id': 'new_snapshot_id', 'share_id': share_data['id']} snap_table = utils.load_table('share_snapshots', engine) engine.execute(snap_table.insert(share_snapshot_data)) # Setup snapshot instances snapshot_instance_data = { 'id': 'new_snapshot_instance_id', 'snapshot_id': share_snapshot_data['id'], 'share_instance_id': share_instance_data['id'] } snap_i_table = utils.load_table('share_snapshot_instances', engine) engine.execute(snap_i_table.insert(snapshot_instance_data)) def check_upgrade(self, engine, data): ss_table = utils.load_table(self.table_name, engine) db_result = engine.execute(ss_table.select()) self.test_case.assertTrue(db_result.rowcount > 0) for ss in db_result: self.test_case.assertTrue(hasattr(ss, 'provider_location')) self.test_case.assertEqual('new_snapshot_instance_id', ss.id) self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id) def check_downgrade(self, engine): ss_table = utils.load_table(self.table_name, engine) db_result = engine.execute(ss_table.select()) self.test_case.assertTrue(db_result.rowcount > 0) for ss in db_result: self.test_case.assertFalse(hasattr(ss, 'provider_location')) self.test_case.assertEqual('new_snapshot_instance_id', ss.id) self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id) manila-2.0.0/manila/tests/db/migrations/test_utils.py0000664000567000056710000000176412701407107024037 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.db.migrations import utils from manila.db.sqlalchemy import api from manila import test class MigrationUtilsTestCase(test.TestCase): def test_load_table(self): connection = api.get_engine() table_name = 'shares' actual_result = utils.load_table(table_name, connection) self.assertIsNotNone(actual_result) self.assertEqual(table_name, actual_result.name) manila-2.0.0/manila/tests/db/fakes.py0000664000567000056710000000301012701407107020537 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite.""" from manila import db class FakeModel(object): """Stubs out for model.""" def __init__(self, values): self.values = values def __getattr__(self, name): return self.values.get(name) def __getitem__(self, key): if key in self.values: return self.values[key] else: raise NotImplementedError() def __repr__(self): return '' % self.values def get(self, key, default=None): return self.__getattr__(key) or default def __contains__(self, key): return self._getattr__(key) def stub_out(stubs, funcs): """Set the stubs in mapping in the db api.""" for func in funcs: func_name = '_'.join(func.__name__.split('_')[1:]) stubs.Set(db, func_name, func) manila-2.0.0/manila/tests/api/0000775000567000056710000000000012701407265017273 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/test_common.py0000664000567000056710000002362412701407107022176 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suites for 'common' code used throughout the OpenStack HTTP API. """ import webob import webob.exc from manila.api import common from manila import test NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" class LimiterTest(test.TestCase): """Unit tests for the `manila.api.common.limited` method. Takes in a list of items and, depending on the 'offset' and 'limit' GET params, returns a subset or complete set of the given items. """ def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() self.tiny = list(range(1)) self.small = list(range(10)) self.medium = list(range(1000)) self.large = list(range(10000)) def test_limiter_offset_zero(self): """Test offset key works with 0.""" req = webob.Request.blank('/?offset=0') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_offset_medium(self): """Test offset key works with a medium sized number.""" req = webob.Request.blank('/?offset=10') self.assertEqual([], common.limited(self.tiny, req)) self.assertEqual(self.small[10:], common.limited(self.small, req)) self.assertEqual(self.medium[10:], common.limited(self.medium, req)) self.assertEqual(self.large[10:1010], common.limited(self.large, req)) def test_limiter_offset_over_max(self): """Test offset key works with a number over 1000 (max_limit).""" req = webob.Request.blank('/?offset=1001') self.assertEqual([], common.limited(self.tiny, req)) self.assertEqual([], common.limited(self.small, req)) self.assertEqual([], common.limited(self.medium, req)) self.assertEqual( self.large[1001:2001], common.limited(self.large, req)) def test_limiter_offset_blank(self): """Test offset key works with a blank offset.""" req = webob.Request.blank('/?offset=') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): """Test offset key works with a BAD offset.""" req = webob.Request.blank(u'/?offset=\u0020aa') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): """Test request with no offset or limit.""" req = webob.Request.blank('/') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_zero(self): """Test limit of zero.""" req = webob.Request.blank('/?limit=0') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_medium(self): """Test limit of 10.""" req = webob.Request.blank('/?limit=10') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium[:10], common.limited(self.medium, req)) self.assertEqual(self.large[:10], common.limited(self.large, req)) def test_limiter_limit_over_max(self): """Test limit of 3000.""" req = webob.Request.blank('/?limit=3000') self.assertEqual(self.tiny, common.limited(self.tiny, req)) self.assertEqual(self.small, common.limited(self.small, req)) self.assertEqual(self.medium, common.limited(self.medium, req)) self.assertEqual(self.large[:1000], common.limited(self.large, req)) def test_limiter_limit_and_offset(self): """Test request with both limit and offset.""" items = list(range(2000)) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual(items[1:4], common.limited(items, req)) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual(items[3:1003], common.limited(items, req)) req = webob.Request.blank('/?offset=3&limit=1500') self.assertEqual(items[3:1003], common.limited(items, req)) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual([], common.limited(items, req)) def test_limiter_custom_max_limit(self): """Test a max_limit other than 1000.""" items = list(range(2000)) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual( items[1:4], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual( items[3:], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3&limit=2500') self.assertEqual( items[3:], common.limited(items, req, max_limit=2000)) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual([], common.limited(items, req, max_limit=2000)) def test_limiter_negative_limit(self): """Test a negative limit.""" req = webob.Request.blank('/?limit=-3000') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): """Test a negative offset.""" req = webob.Request.blank('/?offset=-30') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class PaginationParamsTest(test.TestCase): """Unit tests for the `manila.api.common.get_pagination_params` method. Takes in a request object and returns 'marker' and 'limit' GET params. """ def test_no_params(self): """Test no params.""" req = webob.Request.blank('/') self.assertEqual({}, common.get_pagination_params(req)) def test_valid_marker(self): """Test valid marker param.""" req = webob.Request.blank( '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') self.assertEqual({'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'}, common.get_pagination_params(req)) def test_valid_limit(self): """Test valid limit param.""" req = webob.Request.blank('/?limit=10') self.assertEqual({'limit': 10}, common.get_pagination_params(req)) def test_invalid_limit(self): """Test invalid limit param.""" req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit_and_marker(self): """Test valid limit and marker parameters.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) self.assertEqual({'marker': marker, 'limit': 20}, common.get_pagination_params(req)) class MiscFunctionsTest(test.TestCase): def test_remove_major_version_from_href(self): fixture = 'http://www.testsite.com/v1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_2(self): fixture = 'http://www.testsite.com/v1.1/' expected = 'http://www.testsite.com/' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_3(self): fixture = 'http://www.testsite.com/v10.10' expected = 'http://www.testsite.com' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_4(self): fixture = 'http://www.testsite.com/v1.1/images/v10.5' expected = 'http://www.testsite.com/images/v10.5' actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) def test_remove_version_from_href_bad_request(self): fixture = 'http://www.testsite.com/1.1/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_2(self): fixture = 'http://www.testsite.com/v/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_3(self): fixture = 'http://www.testsite.com/v1.1images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) manila-2.0.0/manila/tests/api/__init__.py0000664000567000056710000000000012701407107021365 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/common.py0000664000567000056710000000225612701407107021135 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def compare_links(actual, expected): """Compare xml atom links.""" return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) def compare_media_types(actual, expected): """Compare xml media types.""" return compare_tree_to_dict(actual, expected, ('base', 'type')) def compare_tree_to_dict(actual, expected, keys): """Compare parts of lxml.etree objects to dicts.""" for elem, data in zip(actual, expected): for key in keys: if elem.get(key) != data.get(key): return False return True manila-2.0.0/manila/tests/api/openstack/0000775000567000056710000000000012701407265021262 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/openstack/test_api_version_request.py0000664000567000056710000001565612701407107026771 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import six from manila.api.openstack import api_version_request from manila.api.openstack import versioned_method from manila import exception from manila import test @ddt.ddt class APIVersionRequestTests(test.TestCase): def test_init(self): result = api_version_request.APIVersionRequest() self.assertIsNone(result._ver_major) self.assertIsNone(result._ver_minor) self.assertFalse(result._experimental) def test_min_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MIN_API_VERSION), api_version_request.min_api_version()) def test_max_api_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MAX_API_VERSION), api_version_request.max_api_version()) @ddt.data( ('1.1', 1, 1), ('2.10', 2, 10), ('5.234', 5, 234), ('12.5', 12, 5), ('2.0', 2, 0), ('2.200', 2, 200) ) @ddt.unpack def test_valid_version_strings(self, version_string, major, minor): request = api_version_request.APIVersionRequest(version_string) self.assertEqual(major, request._ver_major) self.assertEqual(minor, request._ver_minor) def test_null_version(self): v = api_version_request.APIVersionRequest() self.assertTrue(v.is_null()) @ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3', '5.03', '02.1', '2.001', '', ' 2.1', '2.1 ') def test_invalid_version_strings(self, version_string): self.assertRaises(exception.InvalidAPIVersionString, api_version_request.APIVersionRequest, version_string) def test_cmpkey(self): request = api_version_request.APIVersionRequest('1.2') self.assertEqual((1, 2), request._cmpkey()) @ddt.data(True, False) def test_experimental_property(self, experimental): request = api_version_request.APIVersionRequest() request.experimental = experimental self.assertEqual(experimental, request.experimental) def test_experimental_property_value_error(self): request = api_version_request.APIVersionRequest() def set_non_boolean(): request.experimental = 'non_bool_value' self.assertRaises(exception.InvalidParameterValue, set_non_boolean) def test_version_comparisons(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('5.23') v4 = api_version_request.APIVersionRequest('2.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v1 < v2) self.assertTrue(v1 <= v2) self.assertTrue(v3 > v2) self.assertTrue(v3 >= v2) self.assertTrue(v1 != v2) self.assertTrue(v1 == v4) self.assertTrue(v1 != v_null) self.assertTrue(v_null == v_null) self.assertFalse(v1 == '2.0') def test_version_matches(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('2.45') v4 = api_version_request.APIVersionRequest('3.3') v5 = api_version_request.APIVersionRequest('3.23') v6 = api_version_request.APIVersionRequest('2.0') v7 = api_version_request.APIVersionRequest('3.3') v8 = api_version_request.APIVersionRequest('4.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v2.matches(v1, v3)) self.assertTrue(v2.matches(v1, v_null)) self.assertTrue(v1.matches(v6, v2)) self.assertTrue(v4.matches(v2, v7)) self.assertTrue(v4.matches(v_null, v7)) self.assertTrue(v4.matches(v_null, v8)) self.assertFalse(v1.matches(v2, v3)) self.assertFalse(v5.matches(v2, v4)) self.assertFalse(v2.matches(v3, v1)) self.assertTrue(v1.matches(v_null, v_null)) self.assertRaises(ValueError, v_null.matches, v1, v3) def test_version_matches_experimental_request(self): experimental_request = api_version_request.APIVersionRequest('2.0') experimental_request.experimental = True non_experimental_request = api_version_request.APIVersionRequest('2.0') experimental_function = versioned_method.VersionedMethod( 'experimental_function', api_version_request.APIVersionRequest('2.0'), api_version_request.APIVersionRequest('2.1'), True, None) non_experimental_function = versioned_method.VersionedMethod( 'non_experimental_function', api_version_request.APIVersionRequest('2.0'), api_version_request.APIVersionRequest('2.1'), False, None) self.assertTrue(experimental_request.matches_versioned_method( experimental_function)) self.assertTrue(experimental_request.matches_versioned_method( non_experimental_function)) self.assertTrue(non_experimental_request.matches_versioned_method( non_experimental_function)) self.assertFalse(non_experimental_request.matches_versioned_method( experimental_function)) def test_matches_versioned_method(self): request = api_version_request.APIVersionRequest('2.0') self.assertRaises(exception.InvalidParameterValue, request.matches_versioned_method, 'fake_method') def test_get_string(self): v1_string = '3.23' v1 = api_version_request.APIVersionRequest(v1_string) self.assertEqual(v1_string, v1.get_string()) self.assertRaises(ValueError, api_version_request.APIVersionRequest().get_string) @ddt.data(('1', '0'), ('1', '1')) @ddt.unpack def test_str(self, major, minor): request_input = '%s.%s' % (major, minor) request = api_version_request.APIVersionRequest(request_input) request_string = six.text_type(request) self.assertEqual('API Version Request ' 'Major: %s, Minor: %s' % (major, minor), request_string) manila-2.0.0/manila/tests/api/openstack/test_versioned_method.py0000664000567000056710000000244612701407107026232 0ustar jenkinsjenkins00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from manila.api.openstack import versioned_method from manila import test class VersionedMethodTestCase(test.TestCase): def test_str(self): args = ('fake_name', 'fake_min', 'fake_max') method = versioned_method.VersionedMethod(*(args + (False, None))) method_string = six.text_type(method) self.assertEqual('Version Method %s: min: %s, max: %s' % args, method_string) def test_cmpkey(self): method = versioned_method.VersionedMethod( 'fake_name', 'fake_start_version', 'fake_end_version', False, 'fake_func') self.assertEqual('fake_start_version', method._cmpkey()) manila-2.0.0/manila/tests/api/openstack/__init__.py0000664000567000056710000000000012701407107023354 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/openstack/test_wsgi.py0000664000567000056710000007743512701407107023657 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import six import webob import inspect from manila.api.openstack import wsgi from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes @ddt.ddt class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = six.b("") self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = six.b("asdf
    ") self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_cache_and_retrieve_resources(self): request = wsgi.Request.blank('/foo') # Test that trying to retrieve a cached object on # an empty cache fails gracefully self.assertIsNone(request.cached_resource()) self.assertIsNone(request.cached_resource_by_id('r-0')) resources = [{'id': 'r-%s' % x} for x in range(3)] # Cache an empty list of resources using the default name request.cache_resource([]) self.assertEqual({}, request.cached_resource()) self.assertIsNone(request.cached_resource('r-0')) # Cache some resources request.cache_resource(resources[:2]) # Cache one resource request.cache_resource(resources[2]) # Cache a different resource name other_resource = {'id': 'o-0'} request.cache_resource(other_resource, name='other-resource') self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) self.assertIsNone(request.cached_resource_by_id('r-3')) self.assertEqual( {'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]}, request.cached_resource()) self.assertEqual( other_resource, request.cached_resource_by_id('o-0', name='other-resource')) @ddt.data( 'share_type', ) def test_cache_and_retrieve_resources_by_resource(self, resource_name): cache_all_func = 'cache_db_%ss' % resource_name cache_one_func = 'cache_db_%s' % resource_name get_db_all_func = 'get_db_%ss' % resource_name get_db_one_func = 'get_db_%s' % resource_name r = wsgi.Request.blank('/foo') amount = 5 res_range = range(amount) resources = [{'id': 'id%s' % x} for x in res_range] # Store 2 getattr(r, cache_all_func)(resources[:amount - 1]) # Store 1 getattr(r, cache_one_func)(resources[amount - 1]) for i in res_range: self.assertEqual( resources[i], getattr(r, get_db_one_func)('id%s' % i), ) self.assertIsNone(getattr(r, get_db_one_func)('id%s' % amount)) self.assertEqual( {'id%s' % i: resources[i] for i in res_range}, getattr(r, get_db_all_func)()) class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual('pants', serializer.dispatch({}, action='create')) def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action='update')) class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'update')) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = six.b('{"servers":{"a":[2,3]}}') serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(six.b('\n'), six.b('')).replace(six.b(' '), six.b('')) self.assertEqual(expected_json, result) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(six.b('off'), response.body) self.assertEqual(200, response.status_int) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(403, response.status_int) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_method_action_method(self): class Controller(object): def action(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/xml', 'true api_version_request.APIVersionRequest("2.8")): assert_method = self.assertNotIn else: assert_method = self.assertIn assert_method("export_location", instance) assert_method("export_locations", instance) if (api_version_request.APIVersionRequest(version) > api_version_request.APIVersionRequest("2.10")): self.assertIn("replica_state", instance) self.mock_policy_check.assert_has_calls([ get_instances_policy_check_call, share_policy_check_call]) @ddt.data('show', 'get_share_instances') def test_not_found(self, target_method_name): method = getattr(self.controller, target_method_name) action = (target_method_name if target_method_name == 'show' else 'index') self.assertRaises(webob_exc.HTTPNotFound, method, self._get_request('fake'), 'fake') self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, action) @ddt.data(('show', 2), ('get_share_instances', 2), ('index', 1)) @ddt.unpack def test_access(self, target_method_name, args_count): user_context = context.RequestContext('fake', 'fake') req = self._get_request('fake', user_context) policy_exception = exception.PolicyNotAuthorized( action=target_method_name) target_method = getattr(self.controller, target_method_name) args = [i for i in range(1, args_count)] with mock.patch.object(policy, 'check_policy', mock.Mock( side_effect=policy_exception)): self.assertRaises( webob_exc.HTTPForbidden, target_method, req, *args) def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_instances_reset_status_with_different_roles(self, role, valid_code, valid_status, version): ctxt = self._get_context(role) instance, req = self._setup_share_instance_data(version=version) self._reset_status(ctxt, instance, req, db.share_instance_get, valid_code, valid_status, version=version) @ddt.data( ({'os-reset_status': {'x-status': 'bad'}}, '2.6'), ({'os-reset_status': {'status': 'invalid'}}, '2.6'), ({'reset_status': {'x-status': 'bad'}}, '2.7'), ({'reset_status': {'status': 'invalid'}}, '2.7'), ) @ddt.unpack def test_share_instance_invalid_reset_status_body(self, body, version): instance, req = self._setup_share_instance_data() req.headers['X-Openstack-Manila-Api-Version'] = version self._reset_status(self.admin_context, instance, req, db.share_instance_get, 400, constants.STATUS_AVAILABLE, body, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_instance_force_delete_with_different_roles(self, role, resp_code, version): instance, req = self._setup_share_instance_data(version=version) ctxt = self._get_context(role) self._force_delete(ctxt, instance, req, db.share_instance_get, resp_code, version=version) def test_instance_force_delete_missing(self): instance, req = self._setup_share_instance_data( instance={'id': 'fake'}) ctxt = self._get_context('admin') self._force_delete(ctxt, instance, req, db.share_instance_get, 404) manila-2.0.0/manila/tests/api/v2/test_availability_zones.py0000664000567000056710000000771312701407107025126 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from manila.api.v2 import availability_zones from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes @ddt.ddt class AvailabilityZonesAPITest(test.TestCase): @ddt.data( availability_zones.AvailabilityZoneControllerLegacy, availability_zones.AvailabilityZoneController, ) def test_instantiate_controller(self, controller): az_controller = controller() self.assertTrue(hasattr(az_controller, "resource_name")) self.assertEqual("availability_zone", az_controller.resource_name) self.assertTrue(hasattr(az_controller, "_view_builder")) self.assertTrue(hasattr(az_controller._view_builder, "detail_list")) @ddt.data( ('1.0', availability_zones.AvailabilityZoneControllerLegacy), ('2.0', availability_zones.AvailabilityZoneControllerLegacy), ('2.6', availability_zones.AvailabilityZoneControllerLegacy), ('2.7', availability_zones.AvailabilityZoneController), ) @ddt.unpack def test_index(self, version, controller): azs = [ { "id": "fake_id1", "name": "fake_name1", "created_at": "fake_created_at", "updated_at": "fake_updated_at", }, { "id": "fake_id2", "name": "fake_name2", "created_at": "fake_created_at", "updated_at": "fake_updated_at", "deleted": "False", "redundant_key": "redundant_value", }, ] mock_policy_check = self.mock_object(policy, 'check_policy') self.mock_object(availability_zones.db, 'availability_zone_get_all', mock.Mock(return_value=azs)) az_controller = controller() ctxt = context.RequestContext("admin", "fake", True) req = fakes.HTTPRequest.blank('/shares', version=version) req.environ['manila.context'] = ctxt result = az_controller.index(req) availability_zones.db.availability_zone_get_all.\ assert_called_once_with(ctxt) mock_policy_check.assert_called_once_with( ctxt, controller.resource_name, 'index') self.assertIsInstance(result, dict) self.assertEqual(["availability_zones"], list(result.keys())) self.assertIsInstance(result["availability_zones"], list) self.assertEqual(2, len(result["availability_zones"])) self.assertTrue(azs[0] in result["availability_zones"]) azs[1].pop("deleted") azs[1].pop("redundant_key") self.assertTrue(azs[1] in result["availability_zones"]) @ddt.data( ('1.0', availability_zones.AvailabilityZoneController), ('2.0', availability_zones.AvailabilityZoneController), ('2.6', availability_zones.AvailabilityZoneController), ('2.7', availability_zones.AvailabilityZoneControllerLegacy), ) @ddt.unpack def test_index_with_unsupported_versions(self, version, controller): ctxt = context.RequestContext("admin", "fake", True) req = fakes.HTTPRequest.blank('/shares', version=version) req.environ['manila.context'] = ctxt az_controller = controller() self.assertRaises( exception.VersionNotFoundForAPIMethod, az_controller.index, req) manila-2.0.0/manila/tests/api/v2/test_consistency_groups.py0000664000567000056710000007314212701407107025175 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import uuid import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api.openstack import wsgi import manila.api.v2.consistency_groups as cgs from manila.common import constants import manila.consistency_group.api as cg_api from manila import context from manila import db from manila import exception from manila import policy from manila.share import share_types from manila import test from manila.tests.api import fakes from manila.tests import db_utils CONF = cfg.CONF @ddt.ddt class CGApiTest(test.TestCase): """Consistency Groups API Test suite.""" def setUp(self): super(self.__class__, self).setUp() self.controller = cgs.CGController() self.resource_name = self.controller.resource_name self.fake_share_type = {'id': six.text_type(uuid.uuid4())} self.api_version = '2.4' self.request = fakes.HTTPRequest.blank('/consistency-groups', version=self.api_version, experimental=True) self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.context = self.request.environ['manila.context'] def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_cg_data(self, cg=None, version='2.7'): if cg is None: cg = db_utils.create_consistency_group( status=constants.STATUS_AVAILABLE) req = fakes.HTTPRequest.blank('/v2/fake/consistency-groups/%s/action' % cg['id'], version=version) req.headers[wsgi.API_VERSION_REQUEST_HEADER] = version req.headers[wsgi.EXPERIMENTAL_API_REQUEST_HEADER] = 'True' return cg, req def _get_fake_cg(self, **values): cg = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'host': None, 'source_cgsnapshot_id': None, 'share_network_id': None, 'share_server_id': None, 'share_types': [], 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } cg.update(**values) expected_cg = copy.deepcopy(cg) del expected_cg['user_id'] del expected_cg['share_server_id'] expected_cg['links'] = mock.ANY expected_cg['share_types'] = [st['share_type_id'] for st in cg.get('share_types')] return cg, expected_cg def _get_fake_simple_cg(self, **values): cg = { 'id': 'fake_id', 'name': None, } cg.update(**values) expected_cg = copy.deepcopy(cg) expected_cg['links'] = mock.ANY return cg, expected_cg def test_cg_create(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {}} context = self.request.environ['manila.context'] res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( context, share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_invalid_cgsnapshot_state(self): fake_snap_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create', mock.Mock(side_effect=exception.InvalidCGSnapshot( reason='bad status' ))) body = {"consistency_group": {"source_cgsnapshot_id": fake_snap_id}} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_no_default_share_type(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=None)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_name(self): fake_name = 'fake_name' fake_cg, expected_cg = self._get_fake_cg(name=fake_name) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {"name": fake_name}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, name=fake_name, share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_description(self): fake_description = 'fake_description' fake_cg, expected_cg = self._get_fake_cg(description=fake_description) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {"description": fake_description}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, description=fake_description, share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_share_types(self): fake_share_types = [{"share_type_id": self.fake_share_type['id']}] fake_cg, expected_cg = self._get_fake_cg(share_types=fake_share_types) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": { "share_types": [self.fake_share_type['id']]}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_source_cgsnapshot_id(self): fake_snap_id = six.text_type(uuid.uuid4()) fake_cg, expected_cg = self._get_fake_cg( source_cgsnapshot_id=fake_snap_id) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": { "source_cgsnapshot_id": fake_snap_id}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, source_cgsnapshot_id=fake_snap_id) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_share_network_id(self): fake_net_id = six.text_type(uuid.uuid4()) fake_cg, expected_cg = self._get_fake_cg( share_network_id=fake_net_id) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": { "share_network_id": fake_net_id}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, share_network_id=fake_net_id, share_type_ids=mock.ANY) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_no_default_share_type_with_cgsnapshot(self): fake_snap_id = six.text_type(uuid.uuid4()) fake_cg, expected_cg = self._get_fake_cg() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=None)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": { "source_cgsnapshot_id": fake_snap_id}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, source_cgsnapshot_id=fake_snap_id) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_cg, expected_cg = self._get_fake_cg(name=fake_name, description=fake_description) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.cg_api, 'create', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {"name": fake_name, "description": fake_description}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create.assert_called_once_with( self.context, name=fake_name, description=fake_description, share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_invalid_body(self): body = {"not_consistency_group": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_invalid_body_share_types_and_source_cgsnapshot(self): body = {"consistency_group": {"share_types": [], "source_cgsnapshot_id": ""}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_source_cgsnapshot_not_in_available(self): fake_snap_id = six.text_type(uuid.uuid4()) body = {"consistency_group": {"source_cgsnapshot_id": fake_snap_id}} self.mock_object(self.controller.cg_api, 'create', mock.Mock( side_effect=exception.InvalidCGSnapshot(reason='blah'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_source_cgsnapshot_does_not_exist(self): fake_snap_id = six.text_type(uuid.uuid4()) body = {"consistency_group": {"source_cgsnapshot_id": fake_snap_id}} self.mock_object(self.controller.cg_api, 'create', mock.Mock( side_effect=exception.CGSnapshotNotFound( cgsnapshot_id=fake_snap_id))) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_source_cgsnapshot_not_a_uuid(self): fake_snap_id = "Not a uuid" body = {"consistency_group": {"source_cgsnapshot_id": fake_snap_id}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_share_network_id_not_a_uuid(self): fake_net_id = "Not a uuid" body = {"consistency_group": {"share_network_id": fake_net_id}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_invalid_body_share_types_not_a_list(self): body = {"consistency_group": {"share_types": ""}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_invalid_body_invalid_field(self): body = {"consistency_group": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.assertTrue('unknown_field' in six.text_type(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_invalid_share_types_field(self): body = {"consistency_group": {"share_types": 'iamastring'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_create_with_invalid_share_types_field_not_uuids(self): body = {"consistency_group": {"share_types": ['iamastring']}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_cg_update_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_cg, expected_cg = self._get_fake_cg(name=fake_name, description=fake_description) self.mock_object(self.controller.cg_api, 'get', mock.Mock(return_value=fake_cg)) self.mock_object(self.controller.cg_api, 'update', mock.Mock(return_value=fake_cg)) body = {"consistency_group": {"name": fake_name, "description": fake_description}} context = self.request.environ['manila.context'] res_dict = self.controller.update(self.request, fake_cg['id'], body) self.controller.cg_api.update.assert_called_once_with( context, fake_cg, {"name": fake_name, "description": fake_description}) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_cg_update_cg_not_found(self): body = {"consistency_group": {}} self.mock_object(self.controller.cg_api, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_cg_update_invalid_body(self): body = {"not_consistency_group": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_cg_update_invalid_body_invalid_field(self): body = {"consistency_group": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertTrue('unknown_field' in six.text_type(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_cg_update_invalid_body_readonly_field(self): body = {"consistency_group": {"share_types": []}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertTrue('share_types' in six.text_type(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_cg_list_index(self): fake_cg, expected_cg = self._get_fake_simple_cg() self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg])) res_dict = self.controller.index(self.request) self.assertEqual([expected_cg], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_cg_list_index_no_cgs(self): self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[])) res_dict = self.controller.index(self.request) self.assertEqual([], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_cg_list_index_with_limit(self): fake_cg, expected_cg = self._get_fake_simple_cg() fake_cg2, expected_cg2 = self._get_fake_simple_cg(id="fake_id2") self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg, fake_cg2])) req = fakes.HTTPRequest.blank('/consistency_groups?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['consistency_groups'])) self.assertEqual([expected_cg], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_cg_list_index_with_limit_and_offset(self): fake_cg, expected_cg = self._get_fake_simple_cg() fake_cg2, expected_cg2 = self._get_fake_simple_cg(id="fake_id2") self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg, fake_cg2])) req = fakes.HTTPRequest.blank('/consistency_groups?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['consistency_groups'])) self.assertEqual([expected_cg2], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_cg_list_detail(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg])) res_dict = self.controller.detail(self.request) self.assertEqual([expected_cg], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_cg_list_detail_no_cgs(self): self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[])) res_dict = self.controller.detail(self.request) self.assertEqual([], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_cg_list_detail_with_limit(self): fake_cg, expected_cg = self._get_fake_cg() fake_cg2, expected_cg2 = self._get_fake_cg(id="fake_id2") self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg, fake_cg2])) req = fakes.HTTPRequest.blank('/consistency_groups?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['consistency_groups'])) self.assertEqual([expected_cg], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_cg_list_detail_with_limit_and_offset(self): fake_cg, expected_cg = self._get_fake_cg() fake_cg2, expected_cg2 = self._get_fake_cg(id="fake_id2") self.mock_object(cg_api.API, 'get_all', mock.Mock(return_value=[fake_cg, fake_cg2])) req = fakes.HTTPRequest.blank('/consistency_groups?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['consistency_groups'])) self.assertEqual([expected_cg2], res_dict['consistency_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_cg_delete(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get', mock.Mock(return_value=fake_cg)) self.mock_object(cg_api.API, 'delete') res = self.controller.delete(self.request, fake_cg['id']) self.assertEqual(202, res.status_code) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_cg_delete_cg_not_found(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.request, fake_cg['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_cg_delete_in_conflicting_status(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get', mock.Mock(return_value=fake_cg)) self.mock_object(cg_api.API, 'delete', mock.Mock( side_effect=exception.InvalidConsistencyGroup(reason='blah'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, self.request, fake_cg['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_cg_show(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get', mock.Mock(return_value=fake_cg)) req = fakes.HTTPRequest.blank( '/consistency_groups/%s' % fake_cg['id'], version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.show(req, fake_cg['id']) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get') def test_cg_show_as_admin(self): fake_cg, expected_cg = self._get_fake_cg() expected_cg['share_server_id'] = None self.mock_object(cg_api.API, 'get', mock.Mock(return_value=fake_cg)) req = fakes.HTTPRequest.blank( '/consistency_groups/%s' % fake_cg['id'], version=self.api_version, experimental=True) admin_context = req.environ['manila.context'].elevated() req.environ['manila.context'] = admin_context res_dict = self.controller.show(req, fake_cg['id']) self.assertEqual(expected_cg, res_dict['consistency_group']) self.mock_policy_check.assert_called_once_with( admin_context, self.resource_name, 'get') def test_cg_show_cg_not_found(self): fake_cg, expected_cg = self._get_fake_cg() self.mock_object(cg_api.API, 'get', mock.Mock(side_effect=exception.NotFound)) req = fakes.HTTPRequest.blank( '/consistency_groups/%s' % fake_cg['id'], version=self.api_version, experimental=True) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, fake_cg['id']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get') @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_consistency_groups_reset_status_with_different_roles( self, role, valid_code, valid_status, version): ctxt = self._get_context(role) cg, req = self._setup_cg_data(version=version) if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps(body)) req.headers['X-Openstack-Manila-Api-Version'] = version req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db.consistency_group_get, ctxt, cg['id']) else: actual_model = db.consistency_group_get(ctxt, cg['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_consistency_group_force_delete_with_different_roles(self, role, resp_code, version): ctxt = self._get_context(role) cg, req = self._setup_cg_data(version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' body = {action_name: {}} req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response self.assertEqual(resp_code, resp.status_int) manila-2.0.0/manila/tests/api/v2/test_services.py0000664000567000056710000002564712701407107023067 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import ddt import mock from oslo_utils import timeutils from manila.api.v2 import services from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes fake_services_list = [ { 'binary': 'manila-scheduler', 'host': 'host1', 'availability_zone': {'name': 'manila1'}, 'id': 1, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), }, { 'binary': 'manila-share', 'host': 'host1', 'availability_zone': {'name': 'manila1'}, 'id': 2, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)}, { 'binary': 'manila-scheduler', 'host': 'host2', 'availability_zone': {'name': 'manila2'}, 'id': 3, 'disabled': False, 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)}, { 'binary': 'manila-share', 'host': 'host2', 'availability_zone': {'name': 'manila2'}, 'id': 4, 'disabled': True, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), }, ] fake_response_service_list = {'services': [ { 'id': 1, 'binary': 'manila-scheduler', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), }, { 'id': 2, 'binary': 'manila-share', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), }, { 'id': 3, 'binary': 'manila-scheduler', 'host': 'host2', 'zone': 'manila2', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), }, { 'id': 4, 'binary': 'manila-share', 'host': 'host2', 'zone': 'manila2', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), }, ]} def fake_service_get_all(context): return fake_services_list def fake_service_get_by_host_binary(context, host, binary): for service in fake_services_list: if service['host'] == host and service['binary'] == binary: return service return None def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'binary': 'manila-share', 'disabled': values['disabled']} def fake_utcnow(): return datetime.datetime(2012, 10, 29, 13, 42, 11) @ddt.ddt class ServicesTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.mock_object(db, "service_get_all", fake_service_get_all) self.mock_object(timeutils, "utcnow", fake_utcnow) self.mock_object(db, "service_get_by_args", fake_service_get_by_host_binary) self.mock_object(db, "service_update", fake_service_update) self.context = context.get_admin_context() self.controller = services.ServiceController() self.controller_legacy = services.ServiceControllerLegacy() self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ) @ddt.unpack def test_services_list(self, url, version, controller): req = fakes.HTTPRequest.blank('/%s' % url, version=version) req.environ['manila.context'] = self.context res_dict = controller().index(req) self.assertEqual(fake_response_service_list, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_host(self): req = fakes.HTTPRequest.blank('/services?host=host1', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_binary(self): req = fakes.HTTPRequest.blank( '/services?binary=manila-share', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][1], fake_response_service_list['services'][3], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_zone(self): req = fakes.HTTPRequest.blank('/services?zone=manila1', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_status(self): req = fakes.HTTPRequest.blank( '/services?status=enabled', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][2], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_state(self): req = fakes.HTTPRequest.blank('/services?state=up', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_host_binary(self): req = fakes.HTTPRequest.blank( "/services?binary=manila-share&state=up", version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ) @ddt.unpack def test_services_enable(self, url, version, controller): body = {'host': 'host1', 'binary': 'manila-share'} req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) res_dict = controller().update(req, "enable", body) self.assertFalse(res_dict['disabled']) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ) @ddt.unpack def test_services_disable(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s/disable' % url, version=version) body = {'host': 'host1', 'binary': 'manila-share'} res_dict = controller().update(req, "disable", body) self.assertTrue(res_dict['disabled']) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-services', '2.7', services.ServiceControllerLegacy), ('services', '2.6', services.ServiceController), ('services', '1.0', services.ServiceController), ) @ddt.unpack def test_services_update_legacy_url_2_dot_7_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s/fake' % url, version=version) body = {'host': 'host1', 'binary': 'manila-share'} self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, "disable", body, ) @ddt.data( ('os-services', '2.7', services.ServiceControllerLegacy), ('services', '2.6', services.ServiceController), ('services', '1.0', services.ServiceController), ) @ddt.unpack def test_services_list_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().index, req) manila-2.0.0/manila/tests/api/v2/test_quota_class_sets.py0000664000567000056710000001513312701407107024605 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manila.api.v1.quota_class_sets.py """ import copy import ddt import mock from oslo_config import cfg import webob.exc import webob.response from manila.api.v2 import quota_class_sets from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes CONF = cfg.CONF REQ = mock.MagicMock() REQ.environ = {'manila.context': context.get_admin_context()} REQ.environ['manila.context'].is_admin = True REQ.environ['manila.context'].auth_token = 'foo_auth_token' REQ.environ['manila.context'].project_id = 'foo_project_id' REQ_MEMBER = copy.deepcopy(REQ) REQ_MEMBER.environ['manila.context'].is_admin = False @ddt.ddt class QuotaSetsControllerTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = quota_class_sets.QuotaClassSetsController() self.resource_name = self.controller.resource_name self.class_name = 'foo_class_name' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( ('os-', '1.0', quota_class_sets.QuotaClassSetsControllerLegacy), ('os-', '2.6', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.7', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_show_quota(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version, use_admin_context=True) quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, "share_networks": 67, } expected = { 'quota_class_set': { 'id': self.class_name, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), } } for k, v in quotas.items(): CONF.set_default('quota_' + k, v) result = controller().show(req, self.class_name) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') def test_show_quota_not_authorized(self): self.mock_object( quota_class_sets.db, 'authorize_quota_class_context', mock.Mock(side_effect=exception.NotAuthorized)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.show, REQ, self.class_name) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'show') @ddt.data( ('os-', '1.0', quota_class_sets.QuotaClassSetsControllerLegacy), ('os-', '2.6', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.7', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_update_quota(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version, use_admin_context=True) CONF.set_default('quota_shares', 789) body = { 'quota_class_set': { 'class_name': self.class_name, 'shares': 788, } } expected = { 'quota_class_set': { 'shares': body['quota_class_set']['shares'], 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, } } update_result = controller().update( req, self.class_name, body=body) self.assertEqual(expected, update_result) show_result = controller().show(req, self.class_name) expected['quota_class_set']['id'] = self.class_name self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([mock.call( req.environ['manila.context'], self.resource_name, action_name) for action_name in ('update', 'show')]) def test_update_quota_not_authorized(self): body = { 'quota_class_set': { 'class_name': self.class_name, 'shares': 13, } } self.assertRaises( webob.exc.HTTPForbidden, self.controller.update, REQ_MEMBER, self.class_name, body=body) self.mock_policy_check.assert_called_once_with( REQ_MEMBER.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-', '2.7', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.6', quota_class_sets.QuotaClassSetsController), ('', '2.0', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version) for method_name in ('show', 'update'): self.assertRaises( exception.VersionNotFoundForAPIMethod, getattr(controller(), method_name), req, self.class_name) @ddt.data( ('os-', '2.7', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.6', quota_class_sets.QuotaClassSetsController), ('', '2.0', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_update_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, self.class_name) manila-2.0.0/manila/tests/api/v2/test_shares.py0000664000567000056710000023541312701407112022517 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_replicas from manila.api.v2 import shares from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ShareAPITest(test.TestCase): """Share API Test.""" def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(db, 'availability_zone_get') self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.maxDiff = None self.share = { "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, } self.create_mock = mock.Mock( return_value=stubs.stub_share( '1', display_name=self.share['display_name'], display_description=self.share['display_description'], size=100, share_proto=self.share['share_proto'].upper(), instance={ 'availability_zone': self.share['availability_zone'], }) ) self.vt = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', } CONF.set_default("default_share_type", None) def _get_expected_share_detailed_response(self, values=None, admin=False): share = { 'id': '1', 'name': 'displayname', 'availability_zone': 'fakeaz', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'project_id': 'fakeproject', 'host': 'fakehost', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share_proto': 'FAKEPROTO', 'metadata': {}, 'size': 1, 'snapshot_id': '2', 'share_network_id': None, 'status': 'fakestatus', 'share_type': '1', 'volume_type': '1', 'snapshot_support': True, 'is_public': False, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'task_state': None, 'share_type_name': None, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } if values: if 'display_name' in values: values['name'] = values.pop('display_name') if 'display_description' in values: values['description'] = values.pop('display_description') share.update(values) if share.get('share_proto'): share['share_proto'] = share['share_proto'].upper() if admin: share['share_server_id'] = 'fake_share_server_id' return {'share': share} @ddt.data("2.0", "2.1") def test_share_create_original(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') self.assertEqual(expected, res_dict) @ddt.data("2.2", "2.3") def test_share_create_with_snapshot_support_without_cg(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') self.assertEqual(expected, res_dict) @ddt.data("2.4", "2.5") def test_share_create_with_consistency_group(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('share_type_name') if (api_version.APIVersionRequest(microversion) == api_version.APIVersionRequest('2.4')): expected['share'].pop('task_state') self.assertEqual(expected, res_dict) def test_share_create_with_valid_default_share_type(self): self.mock_object(share_types, 'get_share_type_by_name', mock.Mock(return_value=self.vt)) CONF.set_default("default_share_type", self.vt['name']) self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) share_types.get_share_type_by_name.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.vt['name']) self.assertEqual(expected, res_dict) def test_share_create_with_invalid_default_share_type(self): self.mock_object( share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFoundByName( self.vt['name'])), ) CONF.set_default("default_share_type", self.vt['name']) req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(exception.ShareTypeNotFoundByName, self.controller.create, req, {'share': self.share}) share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_replication(self): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank( '/shares', version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share']['task_state'] = None expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['replication_type'] = None expected['share']['share_type_name'] = None expected['share']['has_replicas'] = False expected['share']['access_rules_status'] = 'active' expected['share'].pop('export_location') expected['share'].pop('export_locations') self.assertEqual(expected, res_dict) def test_share_create_with_share_net(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': 'fakenetid'})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) @ddt.data('2.6', '2.7', '2.14', '2.15') def test_migration_start(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True if api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.7")): body = {'os-migrate_share': {'host': 'fake_host'}} method = 'migrate_share_legacy' elif api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.15")): body = {'migrate_share': {'host': 'fake_host'}} method = 'migrate_share' else: body = {'migration_start': {'host': 'fake_host'}} method = 'migration_start' self.mock_object(share_api.API, 'migration_start') response = getattr(self.controller, method)(req, share['id'], body) self.assertEqual(202, response.status_int) def test_migration_start_has_replicas(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request = api_version.APIVersionRequest('2.11') req.api_version_request.experimental = True body = {'migrate_share': {'host': 'fake_host'}} self.mock_object(share_api.API, 'migration_start', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.migrate_share, req, share['id'], body) @ddt.data('2.6', '2.7', '2.14', '2.15') def test_migration_start_no_share_id(self, version): req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id', use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True if api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.7")): body = {'os-migrate_share': {'host': 'fake_host'}} method = 'migrate_share_legacy' elif api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.15")): body = {'migrate_share': {'host': 'fake_host'}} method = 'migrate_share' else: body = {'migration_start': {'host': 'fake_host'}} method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.mock_object(share_api.API, 'get', mock.Mock(side_effect=[exception.NotFound])) self.assertRaises(webob.exc.HTTPNotFound, getattr(self.controller, method), req, 'fake_id', body) @ddt.data('2.6', '2.7', '2.14', '2.15') def test_migration_start_no_host(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True if api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.7")): body = {'os-migrate_share': {}} method = 'migrate_share_legacy' elif api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.15")): body = {'migrate_share': {}} method = 'migrate_share' else: body = {'migration_start': {}} method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) @ddt.data('2.6', '2.7', '2.14', '2.15') def test_migration_start_invalid_force_host_copy(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True if api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.7")): body = {'os-migrate_share': {'host': 'fake_host', 'force_host_copy': 'fake'}} method = 'migrate_share_legacy' elif api_version.APIVersionRequest(version) < ( api_version.APIVersionRequest("2.15")): body = {'migrate_share': {'host': 'fake_host', 'force_host_copy': 'fake'}} method = 'migrate_share' else: body = {'migration_start': {'host': 'fake_host', 'force_host_copy': 'fake'}} method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) def test_migration_start_invalid_notify(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', 'notify': 'error'}} self.mock_object(share_api.API, 'migration_start') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) def test_reset_task_state(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(db, 'share_update') response = self.controller.reset_task_state(req, share['id'], body) self.assertEqual(202, response.status_int) db.share_update.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id'], update) def test_reset_task_state_error_body(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'error': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_error_empty(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': None} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_error_invalid(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(db, 'share_update', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPNotFound, self.controller.reset_task_state, req, share['id'], body) db.share_update.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id'], update) def test_migration_complete(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_complete') response = self.controller.migration_complete(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_complete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_complete_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_complete') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_complete, req, share['id'], body) def test_migration_cancel(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_cancel') response = self.controller.migration_cancel(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_cancel.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_cancel_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_cancel') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_cancel, req, share['id'], body) def test_migration_get_progress(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} expected = {'total_progress': 'fake', 'current_file_progress': 'fake', 'current_file_path': 'fake', } self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_get_progress', mock.Mock(return_value=expected)) response = self.controller.migration_get_progress(req, share['id'], body) self.assertEqual(expected, response) share_api.API.migration_get_progress.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_get_progress_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.15') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_get_progress') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_get_progress, req, share['id'], body) def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) def test_share_create_from_snapshot_without_share_net_parent_exists(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } parent_share_net = 444 create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_with_share_net_equals_parent(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_invalid_share_net(self): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": 1234 } body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_creation_fails_with_bad_size(self): shr = {"size": '', "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1"} body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_share_create_no_body(self): req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, {}) def test_share_create_invalid_availability_zone(self): self.mock_object( db, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id')) ) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) def test_share_show(self): req = fakes.HTTPRequest.blank('/shares/1') expected = self._get_expected_share_detailed_response() expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_consistency_group(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.4') expected = self._get_expected_share_detailed_response() expected['share'].pop('share_type_name') expected['share'].pop('task_state') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_share_type_name(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.6') res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['share_type_name'] = None expected['share']['task_state'] = None self.assertEqual(expected, res_dict) def test_share_show_admin(self): req = fakes.HTTPRequest.blank('/shares/1', use_admin_context=True) expected = self._get_expected_share_detailed_response(admin=True) expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_share_show_with_replication_type(self): req = fakes.HTTPRequest.blank( '/shares/1', version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['task_state'] = None expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['access_rules_status'] = 'active' expected['share']['share_type_name'] = None expected['share']['replication_type'] = None expected['share']['has_replicas'] = False expected['share'].pop('export_location') expected['share'].pop('export_locations') self.assertEqual(expected, res_dict) def test_share_delete(self): req = fakes.HTTPRequest.blank('/shares/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_has_replicas(self): req = fakes.HTTPRequest.blank('/shares/1') self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share)) self.mock_object(share_api.API, 'delete', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.delete, req, 1) def test_share_delete_in_consistency_group_param_not_provided(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_delete_in_consistency_group(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=fake_cg_id') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_in_consistency_group_wrong_id(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=not_fake_cg_id') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_update(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(shr["display_name"], res_dict['share']["name"]) self.assertEqual(shr["display_description"], res_dict['share']["description"]) self.assertEqual(shr['is_public'], res_dict['share']['is_public']) def test_share_update_with_consistency_group(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1', version="2.4") res_dict = self.controller.update(req, 1, body) self.assertIsNone(res_dict['share']["consistency_group_id"]) self.assertIsNone(res_dict['share']["source_cgsnapshot_member_id"]) def test_share_not_updates_size(self): req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, {"share": self.share}) self.assertNotEqual(res_dict['share']["size"], self.share["size"]) def test_share_delete_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def _share_list_summary_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, {'id': 'id2', 'display_name': 'n2'}, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) def test_share_list_summary_with_search_opts_by_non_admin(self): self._share_list_summary_with_search_opts(use_admin_context=False) def test_share_list_summary_with_search_opts_by_admin(self): self._share_list_summary_with_search_opts(use_admin_context=True) def test_share_list_summary(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.index(req) expected = { 'shares': [ { 'name': 'displayname', 'id': '1', 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _share_list_detail_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, { 'id': 'id2', 'display_name': 'n2', 'status': constants.STATUS_AVAILABLE, 'snapshot_id': 'fake_snapshot_id', 'share_type_id': 'fake_share_type_id', 'instance': { 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', }, }, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['status'], result['shares'][0]['status']) self.assertEqual( shares[1]['share_type_id'], result['shares'][0]['share_type']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['instance']['host'], result['shares'][0]['host']) self.assertEqual( shares[1]['instance']['share_network_id'], result['shares'][0]['share_network_id']) def test_share_list_detail_with_search_opts_by_non_admin(self): self._share_list_detail_with_search_opts(use_admin_context=False) def test_share_list_detail_with_search_opts_by_admin(self): self._share_list_detail_with_search_opts(use_admin_context=True) def _list_detail_common_expected(self): return { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'host': 'fakehost', 'id': '1', 'snapshot_id': '2', 'snapshot_support': True, 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } def _list_detail_test_common(self, req, expected): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) res_dict = self.controller.detail(req) self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_share_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env) expected = self._list_detail_common_expected() expected['shares'][0].pop('snapshot_support') self._list_detail_test_common(req, expected) def test_share_list_detail_with_consistency_group(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.4") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_with_task_state(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.5") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None expected['shares'][0]['task_state'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_without_export_locations(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.9") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None expected['shares'][0]['task_state'] = None expected['shares'][0]['share_type_name'] = None expected['shares'][0].pop('export_location') expected['shares'][0].pop('export_locations') self._list_detail_test_common(req, expected) def test_share_list_detail_with_replication_type(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank( '/shares/detail', environ=env, version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.detail(req) expected = { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'access_rules_status': 'active', 'host': 'fakehost', 'id': '1', 'snapshot_id': '2', 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type_name': None, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'snapshot_support': True, 'has_replicas': False, 'replication_type': None, 'task_state': None, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_remove_invalid_options(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'c': 'c'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_remove_invalid_options_admin(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access @ddt.ddt class ShareActionsTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) @ddt.data( {'access_type': 'ip', 'access_to': '127.0.0.1'}, {'access_type': 'user', 'access_to': '1' * 4}, {'access_type': 'user', 'access_to': '1' * 32}, {'access_type': 'user', 'access_to': 'fake\\]{.-_\'`;}['}, {'access_type': 'user', 'access_to': 'MYDOMAIN\\Administrator'}, {'access_type': 'cert', 'access_to': 'x'}, {'access_type': 'cert', 'access_to': 'tenant.example.com'}, {'access_type': 'cert', 'access_to': 'x' * 64}, ) def test_allow_access(self, access): self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) id = 'fake_share_id' body = {'allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank( '/v2/tenant1/shares/%s/action' % id, version="2.7") res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) @ddt.data( {'access_type': 'error_type', 'access_to': '127.0.0.1'}, {'access_type': 'ip', 'access_to': 'localhost'}, {'access_type': 'ip', 'access_to': '127.0.0.*'}, {'access_type': 'ip', 'access_to': '127.0.0.0/33'}, {'access_type': 'ip', 'access_to': '127.0.0.256'}, {'access_type': 'user', 'access_to': '1'}, {'access_type': 'user', 'access_to': '1' * 3}, {'access_type': 'user', 'access_to': '1' * 33}, {'access_type': 'user', 'access_to': 'root^'}, {'access_type': 'cert', 'access_to': ''}, {'access_type': 'cert', 'access_to': ' '}, {'access_type': 'cert', 'access_to': 'x' * 65}, ) def test_allow_access_error(self, access): id = 'fake_share_id' body = {'allow_access': access} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id, version="2.7") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, id, body) @ddt.unpack @ddt.data( {'exc': None, 'access_to': 'alice', 'version': '2.13'}, {'exc': webob.exc.HTTPBadRequest, 'access_to': 'alice', 'version': '2.11'} ) def test_allow_access_ceph(self, exc, access_to, version): share_id = "fake_id" self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share_id, version=version) body = {'allow_access': { 'access_type': 'cephx', 'access_to': access_to, 'access_level': 'rw' }} if exc: self.assertRaises(exc, self.controller.allow_access, req, share_id, body) else: expected = {'access': {'fake': 'fake'}} res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) def test_access_list(self): def _fake_access_get_all(*args, **kwargs): return [{"state": "fakestatus", "id": "fake_share_id", "access_type": "fakeip", "access_to": "127.0.0.1"}] self.mock_object(share_api.API, "access_get_all", _fake_access_get_all) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) expected = _fake_access_get_all() self.assertEqual(expected, res_dict['access_list']) @ddt.unpack @ddt.data( {'body': {'os-extend': {'new_size': 2}}, 'version': '2.6'}, {'body': {'extend': {'new_size': 2}}, 'version': '2.7'}, ) def test_extend(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "extend") size = '2' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._extend(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.extend.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-extend": ""}, {"os-extend": {"new_size": "foo"}}, {"os-extend": {"new_size": {'foo': 'bar'}}}) def test_extend_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}, {'source': exception.ShareSizeExceedsAvailableQuota, 'target': webob.exc.HTTPForbidden}) @ddt.unpack def test_extend_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-extend": {'new_size': '123'}} self.mock_object(share_api.API, "extend", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._extend, req, id, body) @ddt.unpack @ddt.data( {'body': {'os-shrink': {'new_size': 1}}, 'version': '2.6'}, {'body': {'shrink': {'new_size': 1}}, 'version': '2.7'}, ) def test_shrink(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "shrink") size = '1' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._shrink(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.shrink.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-shrink": ""}, {"os-shrink": {"new_size": "foo"}}, {"os-shrink": {"new_size": {'foo': 'bar'}}}) def test_shrink_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._shrink, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}) @ddt.unpack def test_shrink_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-shrink": {'new_size': '123'}} self.mock_object(share_api.API, "shrink", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._shrink, req, id, body) @ddt.ddt class ShareAdminActionsAPITest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() CONF.set_default("default_share_type", None) self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.share_api = share_api.API() self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_data(self, share=None, version='2.7'): if share is None: share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1', override_defaults=True) req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], version=version) return share, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_reset_status_with_different_roles(self, role, valid_code, valid_status, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self._reset_status(ctxt, share, req, db.share_get, valid_code, valid_status, version=version) @ddt.data(*fakes.fixture_invalid_reset_status_body) def test_share_invalid_reset_status_body(self, body): share, req = self._setup_share_data(version='2.6') ctxt = self.admin_context self._reset_status(ctxt, share, req, db.share_get, 400, constants.STATUS_AVAILABLE, body, version='2.6') @ddt.data('2.6', '2.7') def test_share_reset_status_for_missing(self, version): fake_share = {'id': 'missing-share-id'} req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % fake_share['id'], version=version) self._reset_status(self.admin_context, fake_share, req, db.share_snapshot_get, 404, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps({action_name: {}})) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_share_force_delete_with_different_roles(self, role, resp_code, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self._force_delete(ctxt, share, req, db.share_get, resp_code, check_model_in_db=True, version=version) @ddt.data('2.6', '2.7') def test_share_force_delete_missing(self, version): share, req = self._setup_share_data( share={'id': 'fake'}, version=version) ctxt = self._get_context('admin') self._force_delete( ctxt, share, req, db.share_get, 404, version=version) @ddt.ddt class ShareUnmanageTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.share_id = 'fake' self.request = fakes.HTTPRequest.blank( '/share/%s/unmanage' % self.share_id, use_admin_context=True, version='2.7', ) def test_unmanage_share(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) actual_result = self.controller.unmanage(self.request, share['id']) self.assertEqual(202, actual_result.status_int) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) share_api.API.unmanage.assert_called_once_with( self.request.environ['manila.context'], share) def test_unmanage_share_that_has_snapshots(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) snapshots = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_unmanage_share_with_transitional_state(self, share_status): share = dict(status=share_status, id='foo_id', instance={}) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_not_found(self): self.mock_object(share_api.API, 'get', mock.Mock( side_effect=exception.NotFound)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.request, self.share_id) @ddt.data(exception.InvalidShare(reason="fake"), exception.PolicyNotAuthorized(action="fake"),) def test_unmanage_share_invalid(self, side_effect): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock( side_effect=side_effect)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.request, self.share_id) def test_wrong_permissions(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False, version='2.7') self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, req, share_id) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.unmanage, req, share_id) def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL', protocol='fake', share_type='fake', **kwargs): fake_share = { 'export_path': export_path, 'service_host': service_host, 'protocol': protocol, 'share_type': share_type, } fake_share.update(kwargs) return {'share': fake_share} @ddt.ddt class ShareManageTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.resource_name = self.controller.resource_name self.request = fakes.HTTPRequest.blank( '/v2/shares/manage', use_admin_context=True, version='2.7') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def _setup_manage_mocks(self, service_is_up=True): self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( return_value={'host': 'fake'})) self.mock_object(share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'id': 'fake'})) self.mock_object(utils, 'service_is_up', mock.Mock( return_value=service_is_up)) if service_is_up: self.mock_object(utils, 'validate_service_host') else: self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=exception.ServiceIsDown(service='fake'))) @ddt.data({}, {'shares': {}}, {'share': get_fake_manage_body('', None, None)}) def test_share_manage_invalid_body(self, body): self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, self.request, body) def test_share_manage_service_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( side_effect=exception.ServiceNotFound(service_id='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) def test_share_manage_share_type_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock()) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db, 'share_type_get_by_name', mock.Mock( side_effect=exception.ShareTypeNotFoundByName( share_type_name='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) @ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'}, {'service_is_up': True, 'service_host': 'fake@host'}) def test_share_manage_bad_request(self, settings): body = get_fake_manage_body(service_host=settings.pop('service_host')) self._setup_manage_mocks(**settings) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, self.request, body) def test_share_manage_duplicate_share(self): body = get_fake_manage_body() self._setup_manage_mocks() self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exception.ManilaException())) self.assertRaises(webob.exc.HTTPConflict, self.controller.manage, self.request, body) def test_share_manage_forbidden_manage(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage', error) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) def test_share_manage_forbidden_validate_service_host(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar', driver_options=dict(volume_id='quuz')), ) def test_share_manage(self, data): self._test_share_manage(data, "2.7") @ddt.data( get_fake_manage_body(name='foo', description='bar', is_public=True), get_fake_manage_body(name='foo', description='bar', is_public=False) ) def test_share_manage_with_is_public(self, data): self._test_share_manage(data, "2.8") def _test_share_manage(self, data, version): self._setup_manage_mocks() return_share = { 'share_type_id': '', 'id': 'fake', 'instance': {}, } self.mock_object( share_api.API, 'manage', mock.Mock(return_value=return_share)) share = { 'host': data['share']['service_host'], 'export_location': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', } driver_options = data['share'].get('driver_options', {}) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.8')): share['is_public'] = data['share']['is_public'] req = fakes.HTTPRequest.blank('/v2/shares/manage', version=version, use_admin_context=True) actual_result = self.controller.manage(req, data) share_api.API.manage.assert_called_once_with( mock.ANY, share, driver_options) self.assertIsNotNone(actual_result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'manage') def test_wrong_permissions(self): body = get_fake_manage_body() self.assertRaises( webob.exc.HTTPForbidden, self.controller.manage, fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False, version='2.7'), body, ) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.manage, req, share_id) manila-2.0.0/manila/tests/api/v2/test_quota_sets.py0000664000567000056710000003357312701407107023430 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manila.api.v1.quota_sets.py """ import copy import ddt import mock from oslo_config import cfg import webob.exc import webob.response from manila.api.v2 import quota_sets from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila import utils CONF = cfg.CONF REQ = mock.MagicMock() REQ.environ = {'manila.context': context.get_admin_context()} REQ.environ['manila.context'].is_admin = True REQ.environ['manila.context'].auth_token = 'foo_auth_token' REQ.environ['manila.context'].project_id = 'foo_project_id' REQ_WITH_USER = copy.deepcopy(REQ) REQ_WITH_USER.environ['manila.context'].user_id = 'foo_user_id' REQ_WITH_USER.environ['QUERY_STRING'] = 'user_id=foo_user_id' REQ_MEMBER = copy.deepcopy(REQ) REQ_MEMBER.environ['manila.context'].is_admin = False @ddt.ddt class QuotaSetsControllerTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = quota_sets.QuotaSetsController() self.resource_name = self.controller.resource_name self.project_id = 'foo_project_id' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( {"shares": 3, "snapshots": 4, "gigabytes": 5, "snapshot_gigabytes": 6, "share_networks": 7}, {"shares": -1, "snapshots": -1, "gigabytes": -1, "snapshot_gigabytes": -1, "share_networks": -1}, {"shares": 13}, {"snapshots": 24}, {"gigabytes": 7}, {"snapshot_gigabytes": 10001}, {"share_networks": 12345}, ) def test_defaults(self, quotas): for k, v in quotas.items(): CONF.set_default('quota_' + k, v) expected = { 'quota_set': { 'id': self.project_id, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), } } result = self.controller.defaults(REQ, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'show') @ddt.data( ('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'defaults'), ('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'defaults'), ('', '2.7', quota_sets.QuotaSetsController, 'defaults'), ('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'show'), ('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'show'), ('', '2.7', quota_sets.QuotaSetsController, 'show'), ) @ddt.unpack def test_get_quotas_with_different_api_versions(self, url, version, controller, method_name): expected = { 'quota_set': { 'id': self.project_id, 'shares': 50, 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, } } req = fakes.HTTPRequest.blank( '/fooproject/%squota-sets' % url, version=version, use_admin_context=True) result = getattr(controller(), method_name)(req, self.project_id) self.assertEqual(expected, result) @ddt.data(REQ, REQ_WITH_USER) def test_show_quota(self, request): quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, "share_networks": 67, } expected = { 'quota_set': { 'id': self.project_id, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), } } for k, v in quotas.items(): CONF.set_default('quota_' + k, v) result = self.controller.show(request, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( request.environ['manila.context'], self.resource_name, 'show') def test_show_quota_not_authorized(self): self.mock_object( quota_sets.db, 'authorize_project_context', mock.Mock(side_effect=exception.NotAuthorized)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.show, REQ, self.project_id) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'show') @ddt.data(REQ, REQ_WITH_USER) def test_update_quota(self, request): CONF.set_default('quota_shares', 789) body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}} expected = { 'quota_set': { 'shares': body['quota_set']['shares'], 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, } } mock_policy_update_check_call = mock.call( request.environ['manila.context'], self.resource_name, 'update') mock_policy_show_check_call = mock.call( request.environ['manila.context'], self.resource_name, 'show') update_result = self.controller.update( request, self.project_id, body=body) self.assertEqual(expected, update_result) show_result = self.controller.show(request, self.project_id) expected['quota_set']['id'] = self.project_id self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([ mock_policy_update_check_call, mock_policy_show_check_call]) @ddt.data(-2, 'foo', {1: 2}, [1]) def test_update_quota_with_invalid_value(self, value): body = {'quota_set': {'tenant_id': self.project_id, 'shares': value}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, REQ, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'update') def test_user_quota_can_not_be_bigger_than_tenant_quota(self): value = 777 CONF.set_default('quota_shares', value) body = { 'quota_set': { 'tenant_id': self.project_id, 'shares': value + 1, } } self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, REQ_WITH_USER, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( REQ_WITH_USER.environ['manila.context'], self.resource_name, 'update') def test_update_inexistent_quota(self): body = { 'quota_set': { 'tenant_id': self.project_id, 'fake_quota': 13, } } self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, REQ, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'update') def test_update_quota_not_authorized(self): body = {'quota_set': {'tenant_id': self.project_id, 'shares': 13}} self.assertRaises( webob.exc.HTTPForbidden, self.controller.update, REQ_MEMBER, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( REQ_MEMBER.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy), ('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.7', quota_sets.QuotaSetsController), ) @ddt.unpack def test_update_all_quotas_with_force(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s' % url, version=version, use_admin_context=True) quotas = ( ('quota_shares', 13), ('quota_gigabytes', 14), ('quota_snapshots', 15), ('quota_snapshot_gigabytes', 16), ('quota_share_networks', 17), ) for quota, value in quotas: CONF.set_default(quota, value) expected = { 'quota_set': { 'tenant_id': self.project_id, 'shares': quotas[0][1], 'gigabytes': quotas[1][1], 'snapshots': quotas[2][1], 'snapshot_gigabytes': quotas[3][1], 'share_networks': quotas[4][1], 'force': True, } } update_result = controller().update( req, self.project_id, body=expected) expected['quota_set'].pop('force') expected['quota_set'].pop('tenant_id') self.assertEqual(expected, update_result) show_result = controller().show(req, self.project_id) expected['quota_set']['id'] = self.project_id self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([ mock.call(req.environ['manila.context'], self.resource_name, action) for action in ('update', 'show') ]) @ddt.data( ('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy), ('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.7', quota_sets.QuotaSetsController), ) @ddt.unpack def test_delete_tenant_quota(self, url, version, controller): self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project') req = fakes.HTTPRequest.blank( '/fooproject/%s' % url, version=version, use_admin_context=True) result = controller().delete(req, self.project_id) self.assertTrue( utils.IsAMatcher(webob.response.Response) == result ) self.assertTrue(hasattr(result, 'status_code')) self.assertEqual(202, result.status_code) self.assertFalse( quota_sets.QUOTAS.destroy_all_by_project_and_user.called) quota_sets.QUOTAS.destroy_all_by_project.assert_called_once_with( req.environ['manila.context'], self.project_id) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'delete') def test_delete_user_quota(self): project_id = 'foo_project_id' self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project') result = self.controller.delete(REQ_WITH_USER, project_id) self.assertTrue( utils.IsAMatcher(webob.response.Response) == result ) self.assertTrue(hasattr(result, 'status_code')) self.assertEqual(202, result.status_code) quota_sets.QUOTAS.destroy_all_by_project_and_user. \ assert_called_once_with( REQ_WITH_USER.environ['manila.context'], project_id, REQ_WITH_USER.environ['manila.context'].user_id) self.assertFalse(quota_sets.QUOTAS.destroy_all_by_project.called) self.mock_policy_check.assert_called_once_with( REQ_WITH_USER.environ['manila.context'], self.resource_name, 'delete') def test_delete_not_authorized(self): self.assertRaises( webob.exc.HTTPForbidden, self.controller.delete, REQ_MEMBER, self.project_id) self.mock_policy_check.assert_called_once_with( REQ_MEMBER.environ['manila.context'], self.resource_name, 'delete') @ddt.data( ('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.6', quota_sets.QuotaSetsController), ('quota-sets', '2.0', quota_sets.QuotaSetsController), ) @ddt.unpack def test_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) for method_name in ('show', 'defaults', 'delete'): self.assertRaises( exception.VersionNotFoundForAPIMethod, getattr(controller(), method_name), req, self.project_id) @ddt.data( ('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.6', quota_sets.QuotaSetsController), ('quota-sets', '2.0', quota_sets.QuotaSetsController), ) @ddt.unpack def test_update_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, self.project_id) manila-2.0.0/manila/tests/api/v2/test_share_snapshots.py0000664000567000056710000006421412701407107024441 0ustar jenkinsjenkins00000000000000# Copyright 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_serialization import jsonutils import six import webob from manila.api.v2 import share_snapshots from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share MIN_MANAGE_SNAPSHOT_API_VERSION = '2.12' def get_fake_manage_body(share_id=None, provider_location=None, driver_options=None, **kwargs): fake_snapshot = { 'share_id': share_id, 'provider_location': provider_location, 'driver_options': driver_options, } fake_snapshot.update(kwargs) return {'snapshot': fake_snapshot} @ddt.ddt class ShareSnapshotAPITest(test.TestCase): """Share Snapshot API Test.""" def setUp(self): super(self.__class__, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'snapshot_update', stubs.stub_snapshot_update) self.snp_example = { 'share_id': 100, 'size': 12, 'force': False, 'display_name': 'updated_snapshot_name', 'display_description': 'updated_snapshot_description', } def test_snapshot_create(self): self.mock_object(share_api.API, 'create_snapshot', stubs.stub_snapshot_create) body = { 'snapshot': { 'share_id': 'fakeshareid', 'force': False, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } } req = fakes.HTTPRequest.blank('/snapshots') res_dict = self.controller.create(req, body) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) @ddt.data(0, False) def test_snapshot_create_no_support(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': snapshot_support})) body = { 'snapshot': { 'share_id': 100, 'force': False, 'name': 'fake_share_name', 'description': 'fake_share_description', } } req = fakes.HTTPRequest.blank('/snapshots') self.assertRaises( webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) self.assertFalse(share_api.API.create_snapshot.called) def test_snapshot_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/snapshots') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_snapshot_delete(self): self.mock_object(share_api.API, 'delete_snapshot', stubs.stub_snapshot_delete) req = fakes.HTTPRequest.blank('/snapshots/200') resp = self.controller.delete(req, 200) self.assertEqual(202, resp.status_int) def test_snapshot_delete_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 200) def test_snapshot_show(self): req = fakes.HTTPRequest.blank('/snapshots/200') res_dict = self.controller.show(req, 200) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) def test_snapshot_show_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') def test_snapshot_list_summary(self): self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) req = fakes.HTTPRequest.blank('/snapshots') res_dict = self.controller.index(req) expected = { 'snapshots': [ { 'name': 'displaysnapname', 'id': 2, 'links': [ { 'href': 'http://localhost/v1/fake/' 'snapshots/2', 'rel': 'self' }, { 'href': 'http://localhost/fake/snapshots/2', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _snapshot_list_summary_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/snapshots?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) snapshots = [ {'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', }, {'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', }, {'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', }, ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[1]['display_name'], result['snapshots'][0]['name']) def test_snapshot_list_summary_with_search_opts_by_non_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=False) def test_snapshot_list_summary_with_search_opts_by_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=True) def _snapshot_list_detail_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) snapshots = [ { 'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', 'aggregate_status': 'fake_status', }, { 'id': 'id2', 'display_name': 'n2', 'status': 'someotherstatus', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', 'aggregate_status': 'fake_status', }, ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[1]['display_name'], result['snapshots'][0]['name']) self.assertEqual( snapshots[1]['aggregate_status'], result['snapshots'][0]['status']) self.assertEqual( snapshots[1]['share_id'], result['snapshots'][0]['share_id']) def test_snapshot_list_detail_with_search_opts_by_non_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=False) def test_snapshot_list_detail_with_search_opts_by_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=True) def test_snapshot_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env) res_dict = self.controller.detail(req) expected_s = fake_share.expected_snapshot(id=2) expected = {'snapshots': [expected_s['snapshot']]} self.assertEqual(expected, res_dict) def test_snapshot_updates_description(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_name"], res_dict['snapshot']["name"]) def test_snapshot_updates_display_descr(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_description"], res_dict['snapshot']["description"]) def test_share_not_updates_size(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertNotEqual(snp["size"], res_dict['snapshot']["size"]) @ddt.ddt class ShareSnapshotAdminActionsAPITest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.resource_name = self.controller.resource_name self.manage_request = fakes.HTTPRequest.blank( '/snapshots/manage', use_admin_context=True, version=MIN_MANAGE_SNAPSHOT_API_VERSION) self.snapshot_id = 'fake' self.unmanage_request = fakes.HTTPRequest.blank( '/snapshots/%s/unmanage' % self.snapshot_id, use_admin_context=True, version=MIN_MANAGE_SNAPSHOT_API_VERSION) def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_snapshot_data(self, snapshot=None, version='2.7'): if snapshot is None: share = db_utils.create_share() snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank('/v2/fake/snapshots/%s/action' % snapshot['id'], version=version) return snapshot, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_snapshot_reset_status_with_different_roles(self, role, valid_code, valid_status, version): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data(version=version) self._reset_status(ctxt, snapshot, req, db.share_snapshot_get, valid_code, valid_status, version=version) @ddt.data( ({'os-reset_status': {'x-status': 'bad'}}, '2.6'), ({'reset_status': {'x-status': 'bad'}}, '2.7'), ({'os-reset_status': {'status': 'invalid'}}, '2.6'), ({'reset_status': {'status': 'invalid'}}, '2.7'), ) @ddt.unpack def test_snapshot_invalid_reset_status_body(self, body, version): snapshot, req = self._setup_snapshot_data(version=version) self._reset_status(self.admin_context, snapshot, req, db.share_snapshot_get, 400, constants.STATUS_AVAILABLE, body, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps({action_name: {}})) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # Validate response self.assertEqual(valid_code, resp.status_int) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_snapshot_force_delete_with_different_roles(self, role, resp_code, version): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data(version=version) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, resp_code, version=version) def test_snapshot_force_delete_missing(self): ctxt = self._get_context('admin') snapshot, req = self._setup_snapshot_data(snapshot={'id': 'fake'}) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, 404) @ddt.data( {}, {'snapshots': {}}, {'snapshot': get_fake_manage_body(share_id='xxxxxxxx')}, {'snapshot': get_fake_manage_body(provider_location='xxxxxxxx')} ) def test_snapshot_manage_invalid_body(self, body): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, self.manage_request, body) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), ) def test_snapshot_manage(self, data): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) data['snapshot']['share_id'] = 'fake' data['snapshot']['provider_location'] = 'fake_volume_snapshot_id' data['snapshot']['driver_options'] = {} return_snapshot = {'id': 'fake_snap'} self.mock_object( share_api.API, 'manage_snapshot', mock.Mock( return_value=return_snapshot)) share_snapshot = { 'share_id': 'fake', 'provider_location': 'fake_volume_snapshot_id', 'display_name': 'foo', 'display_description': 'bar', } actual_result = self.controller.manage(self.manage_request, data) share_api.API.manage_snapshot.assert_called_once_with( mock.ANY, share_snapshot, data['snapshot']['driver_options']) self.assertEqual(return_snapshot['id'], actual_result['snapshot']['id']) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data(exception.ShareNotFound(share_id='fake'), exception.ShareSnapshotNotFound(snapshot_id='fake'), exception.ManageInvalidShareSnapshot(reason='error')) def test_manage_exception(self, exception_type): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) body = get_fake_manage_body( share_id='fake', provider_location='fake_volume_snapshot_id', driver_options={}) self.mock_object( share_api.API, 'manage_snapshot', mock.Mock( side_effect=exception_type)) if isinstance(exception_type, exception.ManageInvalidShareSnapshot): http_ex = webob.exc.HTTPConflict else: http_ex = webob.exc.HTTPNotFound self.assertRaises(http_ex, self.controller.manage, self.manage_request, body) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data('1.0', '2.6', '2.11') def test_manage_version_not_found(self, version): body = get_fake_manage_body( share_id='fake', provider_location='fake_volume_snapshot_id', driver_options={}) fake_req = fakes.HTTPRequest.blank( '/snapshots/manage', use_admin_context=True, version=version) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.manage, fake_req, body) def test_snapshot_unmanage_share_server(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'share_server_id': 'fake_server_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.unmanage_request, snapshot['id']) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) self.controller.share_api.get.assert_called_once_with( self.unmanage_request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_snapshot_unmanage_with_transitional_state(self, status): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': status, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object( self.controller.share_api, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.unmanage_request, snapshot['id']) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) self.controller.share_api.get.assert_called_once_with( self.unmanage_request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_snapshot_unmanage(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'host': 'fake_host'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) actual_result = self.controller.unmanage(self.unmanage_request, snapshot['id']) self.assertEqual(202, actual_result.status_int) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) share_api.API.unmanage_snapshot.assert_called_once_with( mock.ANY, snapshot, 'fake_host') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_unmanage_share_not_found(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.mock_object( share_api.API, 'get', mock.Mock( side_effect=exception.ShareNotFound(share_id='fake'))) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.unmanage_request, 'foo_id') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_unmanage_snapshot_not_found(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock( side_effect=exception.ShareSnapshotNotFound( snapshot_id='foo_id'))) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.unmanage_request, 'foo_id') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') @ddt.data('1.0', '2.6', '2.11') def test_unmanage_version_not_found(self, version): snapshot_id = 'fake' fake_req = fakes.HTTPRequest.blank( '/snapshots/%s/unmanage' % snapshot_id, use_admin_context=True, version=version) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.unmanage, fake_req, 'fake') manila-2.0.0/manila/tests/api/v2/test_cgsnapshots.py0000664000567000056710000006310512701407107023567 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import uuid import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api.openstack import wsgi import manila.api.v2.cgsnapshots as cgs from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils CONF = cfg.CONF @ddt.ddt class CGSnapshotApiTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = cgs.CGSnapshotController() self.resource_name = self.controller.resource_name self.api_version = '2.4' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.request = fakes.HTTPRequest.blank('/consistency-groups', version=self.api_version, experimental=True) self.context = self.request.environ['manila.context'] self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') def _get_fake_cgsnapshot(self, **values): snap = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'consistency_group_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } snap.update(**values) expected_snap = copy.deepcopy(snap) del expected_snap['user_id'] expected_snap['links'] = mock.ANY return snap, expected_snap def _get_fake_simple_cgsnapshot(self, **values): snap = { 'id': 'fake_id', 'name': None, } snap.update(**values) expected_snap = copy.deepcopy(snap) expected_snap['links'] = mock.ANY return snap, expected_snap def _get_fake_cgsnapshot_member(self, **values): member = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'cgsnapshot_id': None, 'share_proto': None, 'share_type_id': None, 'share_id': None, 'size': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } member.update(**values) expected_member = copy.deepcopy(member) del expected_member['user_id'] del expected_member['status'] expected_member['share_protocol'] = member['share_proto'] del expected_member['share_proto'] return member, expected_member def test_create_invalid_body(self): body = {"not_cg_snapshot": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_no_consistency_group_id(self): body = {"cgnapshot": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() fake_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock(return_value=fake_snap)) body = {"cgsnapshot": {"consistency_group_id": fake_id}} res_dict = self.controller.create(self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') self.controller.cg_api.create_cgsnapshot.assert_called_once_with( self.context, consistency_group_id=fake_id) self.assertEqual(expected_snap, res_dict['cgsnapshot']) def test_create_cg_does_not_exist(self): fake_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock( side_effect=exception.ConsistencyGroupNotFound( consistency_group_id=six.text_type( uuid.uuid4()) ))) body = {"cgsnapshot": {"consistency_group_id": fake_id}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_cg_does_not_a_uuid(self): self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock( side_effect=exception.ConsistencyGroupNotFound( consistency_group_id='not_a_uuid' ))) body = {"cgsnapshot": {"consistency_group_id": "not_a_uuid"}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_invalid_cg(self): fake_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock( side_effect=exception.InvalidConsistencyGroup( reason='bad_status' ))) body = {"cgsnapshot": {"consistency_group_id": fake_id}} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_name(self): fake_name = 'fake_name' fake_snap, expected_snap = self._get_fake_cgsnapshot(name=fake_name) fake_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock(return_value=fake_snap)) body = {"cgsnapshot": {"consistency_group_id": fake_id, "name": fake_name}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create_cgsnapshot.assert_called_once_with( self.context, consistency_group_id=fake_id, name=fake_name) self.assertEqual(expected_snap, res_dict['cgsnapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_description(self): fake_description = 'fake_description' fake_snap, expected_snap = self._get_fake_cgsnapshot( description=fake_description) fake_id = six.text_type(uuid.uuid4()) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock(return_value=fake_snap)) body = {"cgsnapshot": {"consistency_group_id": fake_id, "description": fake_description}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create_cgsnapshot.assert_called_once_with( self.context, consistency_group_id=fake_id, description=fake_description) self.assertEqual(expected_snap, res_dict['cgsnapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_id = six.text_type(uuid.uuid4()) fake_snap, expected_snap = self._get_fake_cgsnapshot( description=fake_description, name=fake_name) self.mock_object(self.controller.cg_api, 'create_cgsnapshot', mock.Mock(return_value=fake_snap)) body = {"cgsnapshot": {"consistency_group_id": fake_id, "description": fake_description, "name": fake_name}} res_dict = self.controller.create(self.request, body) self.controller.cg_api.create_cgsnapshot.assert_called_once_with( self.context, consistency_group_id=fake_id, name=fake_name, description=fake_description) self.assertEqual(expected_snap, res_dict['cgsnapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_update_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_id = six.text_type(uuid.uuid4()) fake_snap, expected_snap = self._get_fake_cgsnapshot( description=fake_description, name=fake_name) self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(return_value=fake_snap)) self.mock_object(self.controller.cg_api, 'update_cgsnapshot', mock.Mock(return_value=fake_snap)) body = {"cgsnapshot": {"description": fake_description, "name": fake_name}} res_dict = self.controller.update(self.request, fake_id, body) self.controller.cg_api.update_cgsnapshot.assert_called_once_with( self.context, fake_snap, dict(name=fake_name, description=fake_description)) self.assertEqual(expected_snap, res_dict['cgsnapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_snapshot_not_found(self): body = {"cgsnapshot": {}} self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body(self): body = {"not_cgsnapshot": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body_invalid_field(self): body = {"cgsnapshot": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertTrue('unknown_field' in six.text_type(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body_readonly_field(self): body = {"cgsnapshot": {"created_at": []}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertTrue('created_at' in six.text_type(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_list_index(self): fake_snap, expected_snap = self._get_fake_simple_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap])) res_dict = self.controller.index(self.request) self.assertEqual([expected_snap], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_index_no_cgs(self): self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[])) res_dict = self.controller.index(self.request) self.assertEqual([], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_index_with_limit(self): fake_snap, expected_snap = self._get_fake_simple_cgsnapshot() fake_snap2, expected_snap2 = self._get_fake_simple_cgsnapshot( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/cgsnapshots?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['cgsnapshots'])) self.assertEqual([expected_snap], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_index_with_limit_and_offset(self): fake_snap, expected_snap = self._get_fake_simple_cgsnapshot() fake_snap2, expected_snap2 = self._get_fake_simple_cgsnapshot( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/cgsnapshots?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['cgsnapshots'])) self.assertEqual([expected_snap2], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_detail(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap])) res_dict = self.controller.detail(self.request) self.assertEqual([expected_snap], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_detail_no_cgs(self): self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[])) res_dict = self.controller.detail(self.request) self.assertEqual([], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_detail_with_limit(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() fake_snap2, expected_snap2 = self._get_fake_cgsnapshot( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/cgsnapshots?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['cgsnapshots'])) self.assertEqual([expected_snap], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_detail_with_limit_and_offset(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() fake_snap2, expected_snap2 = self._get_fake_cgsnapshot( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/cgsnapshots?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['cgsnapshots'])) self.assertEqual([expected_snap2], res_dict['cgsnapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_delete(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(return_value=fake_snap)) self.mock_object(self.controller.cg_api, 'delete_cgsnapshot') res = self.controller.delete(self.request, fake_snap['id']) self.assertEqual(202, res.status_code) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_delete_not_found(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_delete_in_conflicting_status(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(return_value=fake_snap)) self.mock_object(self.controller.cg_api, 'delete_cgsnapshot', mock.Mock( side_effect=exception.InvalidCGSnapshot( reason='blah'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_show(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(return_value=fake_snap)) res_dict = self.controller.show(self.request, fake_snap['id']) self.assertEqual(expected_snap, res_dict['cgsnapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_cgsnapshot') def test_show_cg_not_found(self): fake_snap, expected_snap = self._get_fake_cgsnapshot() self.mock_object(self.controller.cg_api, 'get_cgsnapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_cgsnapshot') def test_members_empty(self): self.mock_object(self.controller.cg_api, 'get_all_cgsnapshot_members', mock.Mock(return_value=[])) res_dict = self.controller.members(self.request, 'fake_cg_id') self.assertEqual([], res_dict['cgsnapshot_members']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_cgsnapshot') def test_members(self): fake_member, expected_member = self._get_fake_cgsnapshot_member() self.mock_object(self.controller.cg_api, 'get_all_cgsnapshot_members', mock.Mock(return_value=[fake_member])) res_dict = self.controller.members(self.request, 'fake_cg_id') self.assertEqual([expected_member], res_dict['cgsnapshot_members']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_cgsnapshot') def test_members_with_limit(self): fake_member, expected_member = self._get_fake_cgsnapshot_member() fake_member2, expected_member2 = self._get_fake_cgsnapshot_member( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshot_members', mock.Mock(return_value=[fake_member, fake_member2])) req = fakes.HTTPRequest.blank('/members?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.members(req, 'fake_cg_id') self.assertEqual(1, len(res_dict['cgsnapshot_members'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_cgsnapshot') def test_members_with_limit_and_offset(self): fake_member, expected_member = self._get_fake_cgsnapshot_member() fake_member2, expected_member2 = self._get_fake_cgsnapshot_member( id="fake_id2") self.mock_object(self.controller.cg_api, 'get_all_cgsnapshot_members', mock.Mock(return_value=[fake_member, fake_member2])) req = fakes.HTTPRequest.blank('/members?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.members(req, 'fake_cg_id') self.assertEqual(1, len(res_dict['cgsnapshot_members'])) self.assertEqual([expected_member2], res_dict['cgsnapshot_members']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_cgsnapshot') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_cgsnapshot_data(self, cgsnapshot=None, version='2.7'): if cgsnapshot is None: cgsnapshot = db_utils.create_cgsnapshot( 'fake_id', status=constants.STATUS_AVAILABLE) req = fakes.HTTPRequest.blank('/v2/fake/cgsnapshots/%s/action' % cgsnapshot['id'], version=version) req.headers[wsgi.API_VERSION_REQUEST_HEADER] = version req.headers[wsgi.EXPERIMENTAL_API_REQUEST_HEADER] = 'True' return cgsnapshot, req @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_cgsnapshot_force_delete_with_different_roles(self, role, resp_code, version): cgsnap, req = self._setup_cgsnapshot_data() ctxt = self._get_context(role) req.method = 'POST' req.headers['content-type'] = 'application/json' if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' body = {action_name: {'status': constants.STATUS_ERROR}} req.body = six.b(jsonutils.dumps(body)) req.headers['X-Openstack-Manila-Api-Version'] = version req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # Validate response self.assertEqual(resp_code, resp.status_int) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_cgsnapshot_reset_status_with_different_roles( self, role, valid_code, valid_status, version): ctxt = self._get_context(role) cgsnap, req = self._setup_cgsnapshot_data(version=version) if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps(body)) req.headers['X-Openstack-Manila-Api-Version'] = version req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # Validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db.cgsnapshot_get, ctxt, cgsnap['id']) else: actual_model = db.cgsnapshot_get(ctxt, cgsnap['id']) self.assertEqual(valid_status, actual_model['status']) manila-2.0.0/manila/tests/api/test_extensions.py0000664000567000056710000001106612701407107023102 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import iso8601 import mock from oslo_config import cfg from oslo_serialization import jsonutils import webob from manila.api import extensions from manila.api.v1 import router from manila import policy from manila import test CONF = cfg.CONF NS = "{http://docs.openstack.org/common/api/v1.0}" class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() ext_list = CONF.osapi_share_extension[:] fox = ('manila.tests.api.extensions.foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(osapi_share_extension=ext_list) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = [] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(self.ext_list, names) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( {'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, fox_ext) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(ext['alias'], output['extension']['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( {"name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}, data['extension']) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) @ddt.ddt class ExtensionAuthorizeTestCase(test.TestCase): @ddt.unpack @ddt.data({'action': 'fake', 'valid': 'api_extension:fake:fake'}, {'action': None, 'valid': 'api_extension:fake'}) def test_extension_authorizer(self, action, valid): self.mock_object(policy, 'enforce') target = 'fake' extensions.extension_authorizer('api', 'fake')( {}, target, action) policy.enforce.assert_called_once_with(mock.ANY, valid, target) def test_extension_authorizer_empty_target(self): self.mock_object(policy, 'enforce') target = None context = mock.Mock() context.project_id = 'fake' context.user_id = 'fake' extensions.extension_authorizer('api', 'fake')( context, target, 'fake') policy.enforce.assert_called_once_with( mock.ANY, mock.ANY, {'project_id': 'fake', 'user_id': 'fake'}) manila-2.0.0/manila/tests/api/v1/0000775000567000056710000000000012701407265017621 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/v1/test_share_unmanage.py0000664000567000056710000001621312701407107024205 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import webob from manila.api.v1 import share_unmanage from manila.common import constants from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes @ddt.ddt class ShareUnmanageTest(test.TestCase): """Share Unmanage Test.""" def setUp(self): super(ShareUnmanageTest, self).setUp() self.controller = share_unmanage.ShareUnmanageController() self.resource_name = self.controller.resource_name self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.share_id = 'fake' self.request = fakes.HTTPRequest.blank( '/share/%s/unmanage' % self.share_id, use_admin_context=True ) self.context = self.request.environ['manila.context'] self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def test_unmanage_share(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) actual_result = self.controller.unmanage(self.request, share['id']) self.assertEqual(202, actual_result.status_int) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) share_api.API.unmanage.assert_called_once_with( self.request.environ['manila.context'], share) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_that_has_snapshots(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) snapshots = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_unmanage_share_with_transitional_state(self, share_status): share = dict(status=share_status, id='foo_id', instance={}) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_not_found(self): self.mock_object(share_api.API, 'get', mock.Mock( side_effect=exception.NotFound)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.request, self.share_id) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') @ddt.data(exception.InvalidShare(reason="fake"), exception.PolicyNotAuthorized(action="fake"),) def test_unmanage_share_invalid(self, side_effect): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock( side_effect=side_effect)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.request, self.share_id) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_wrong_permissions(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, req, share_id) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'unmanage') manila-2.0.0/manila/tests/api/v1/test_limits.py0000664000567000056710000007134012701407107022533 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ from oslo_serialization import jsonutils import six from six import moves from six.moves import http_client import webob from manila.api.v1 import limits from manila.api import views import manila.context from manila import test TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), limits.Limit("POST", "/shares", "^/shares", 3, limits.PER_MINUTE), limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/shares", "^/shares", 5, limits.PER_MINUTE), ] NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0' } class BaseLimitTestSuite(test.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.mock_object(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def stub_get_project_quotas(context, project_id, usages=True): quotas = {} for mapping_key in ('limit', 'in_use'): for k, v in self.absolute_limits.get(mapping_key, {}).items(): if k not in quotas: quotas[k] = {} quotas[k].update({mapping_key: v}) return quotas self.mock_object(manila.quota.QUOTAS, "get_project_quotas", stub_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.create_resource() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" request = webob.Request.blank("/") request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", "controller": "", }) context = manila.context.RequestContext('testuser', 'testproject') request.environ["manila.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["manila.limits"] = _limits return request def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() response = request.get_response(self.controller) expected = { "limits": { "rate": [], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def test_index_json(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) self.absolute_limits = { 'limit': { 'shares': 11, 'gigabytes': 22, 'snapshots': 33, 'snapshot_gigabytes': 44, 'share_networks': 55, }, 'in_use': { 'shares': 3, 'gigabytes': 4, 'snapshots': 5, 'snapshot_gigabytes': 6, 'share_networks': 7, }, } response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00Z", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": { "totalSharesUsed": 3, "totalShareGigabytesUsed": 4, "totalShareSnapshotsUsed": 5, "totalSnapshotGigabytesUsed": 6, "totalShareNetworksUsed": 7, "maxTotalShares": 11, "maxTotalShareGigabytes": 22, "maxTotalShareSnapshots": 33, "maxTotalSnapshotGigabytes": 44, "maxTotalShareNetworks": 55, }, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["manila.limits"] = _limits return request def test_index_diff_regex(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = request.get_response(self.controller) body = jsonutils.loads(response.body) self.assertEqual(expected, body['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = { 'in_use': {'unknown_limit': 9000}, 'limit': {'unknown_limit': 9001}, } self._test_index_absolute_limits_json({}) class TestLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): """Test that middleware selected correct limiter class.""" assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): """Test a rate-limited (413) GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertTrue('Retry-After' in response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimitFault"]["details"].strip() self.assertEqual(expected, value) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """Test default limits parser. Tests for the default limits parser in the in-memory `limits.Limiter` class. """ def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): """Test that parse_limits() handles bad rules correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): """Test that parse_limits() handles missing args correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): """Test that parse_limits() handles bad values correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): """Test that parse_limits() handles bad units correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): """Test that parse_limits() handles multiple rules correctly.""" try: l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, six.text_types(e) # Make sure the number of returned limits are correct self.assertEqual(4, len(l)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in l]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in l]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in l]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in l]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in l]) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'user:user3': ''} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in moves.range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """Test no delay on GET for single call. Simple test to ensure no delay on a single call for a limit verb we didn"t set. """ delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): """Test no delay on single call. Simple test to ensure no delay on a single call for a known limit. """ delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """Ensure 11th PUT will be delayed. Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """Ensure 8th POST will be delayed. Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.failUnlessAlmostEqual(expected, results, 8) def test_delay_GET(self): """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): """Ensure PUT limits. Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still OK after 5 requests...but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/shares")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Test limit handling. Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) def test_user_limit(self): """Test user-specific limits.""" self.assertEqual([], self.limiter.levels['user3']) def test_multiple_users(self): """Tests involving multiple users.""" # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return six.b(jsonutils.dumps({"verb": verb, "path": path})) def _request(self, verb, url, username=None): """Send request. Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): """Only POSTs should work.""" for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual('60.00', delay) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual('60.00', delay) delay = self._request("GET", "/delayed", "user2") self.assertEqual('60.00', delay) class FakeHttplibSocket(object): """Fake `http_client.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" self._buffer = six.BytesIO(six.b(response_string)) def makefile(self, _mode, _other=None): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `http_client.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Translate request to WSGI app. Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = six.b(body) resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Wire HTTPConnection to WSGI app. Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Wrapper for HTTPConnection class Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection http_client.HTTPConnection = HTTPConnectionDecorator( http_client.HTTPConnection) return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """Set up HTTP/WSGI magic. Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") def test_200(self): """Successful request test.""" delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): """Forbidden request test.""" delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", six.b("403 Forbidden\n\nOnly 1 GET request(s) " "can be made to /delayed every minute.")) self.assertEqual(expected, (delay, error)) def tearDown(self): # restore original HTTPConnection object http_client.HTTPConnection = self.oldHTTPConnection super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewBuilderTest(test.TestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/shares", "regex": "^/shares", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = { "limit": { "shares": 111, "gigabytes": 222, "snapshots": 333, "snapshot_gigabytes": 444, "share_networks": 555, }, "in_use": { "shares": 65, "gigabytes": 76, "snapshots": 87, "snapshot_gigabytes": 98, "share_networks": 107, }, } def test_build_limits(self): tdate = "2011-07-21T18:17:06Z" expected_limits = { "limits": { "rate": [ {"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/shares", "regex": "^/shares", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]} ], "absolute": { "totalSharesUsed": 65, "totalShareGigabytesUsed": 76, "totalShareSnapshotsUsed": 87, "totalSnapshotGigabytesUsed": 98, "totalShareNetworksUsed": 107, "maxTotalShares": 111, "maxTotalShareGigabytes": 222, "maxTotalShareSnapshots": 333, "maxTotalSnapshotGigabytes": 444, "maxTotalShareNetworks": 555, } } } output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictMatch(expected_limits, output) def test_build_limits_empty_limits(self): expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(rate_limits, abs_limits) self.assertDictMatch(expected_limits, output) manila-2.0.0/manila/tests/api/v1/__init__.py0000664000567000056710000000000012701407107021713 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/api/v1/stubs.py0000664000567000056710000000763512701407107021341 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.common import constants from manila import exception as exc FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} def stub_volume(id, **kwargs): volume = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': 1, 'availability_zone': 'fakeaz', 'instance_uuid': 'fakeuuid', 'mountpoint': '/', 'status': 'fakestatus', 'attach_status': 'attached', 'bootable': 'false', 'name': 'vol name', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'snapshot_id': None, 'source_volid': None, 'share_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'volume_metadata': [], 'share_type': {'name': 'share_type_name'}, 'volume_type': {'name': 'share_type_name'}} volume.update(kwargs) return volume def stub_volume_create(self, context, size, name, description, snapshot, **param): vol = stub_volume('1') vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['source_volid'] = None try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def stub_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = stub_volume('1') vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'manila' return vol def stub_volume_update(self, context, *args, **param): pass def stub_volume_delete(self, context, *args, **param): pass def stub_volume_get(self, context, volume_id): return stub_volume(volume_id) def stub_volume_get_notfound(self, context, volume_id): raise exc.NotFound def stub_volume_get_all(context, search_opts=None): return [stub_volume(100, project_id='fake'), stub_volume(101, project_id='superfake'), stub_volume(102, project_id='superduperfake')] def stub_volume_get_all_by_project(self, context, search_opts=None): return [stub_volume_get(self, context, '1')] def stub_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': 12, 'status': constants.STATUS_AVAILABLE, 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake'} snapshot.update(kwargs) return snapshot def stub_snapshot_get_all(self): return [stub_snapshot(100, project_id='fake'), stub_snapshot(101, project_id='superfake'), stub_snapshot(102, project_id='superduperfake')] def stub_snapshot_get_all_by_project(self, context): return [stub_snapshot(1)] def stub_snapshot_update(self, context, *args, **param): pass manila-2.0.0/manila/tests/api/v1/test_security_service.py0000664000567000056710000004063212701407107024621 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from six.moves.urllib import parse import webob from manila.api.v1 import security_service from manila.common import constants from manila import db from manila import exception from manila import test from manila.tests.api import fakes class ShareApiTest(test.TestCase): """Share Api Test.""" def setUp(self): super(ShareApiTest, self).setUp() self.controller = security_service.SecurityServiceController() self.maxDiff = None self.ss_active_directory = { "created_at": "fake-time", "updated_at": "fake-time-2", "id": 1, "name": "fake-name", "description": "Fake Security Service Desc", "type": constants.SECURITY_SERVICES_ALLOWED_TYPES[0], "dns_ip": "1.1.1.1", "server": "fake-server", "domain": "fake-domain", "user": "fake-user", "password": "fake-password", "status": constants.STATUS_NEW, "project_id": "fake", } self.ss_ldap = { "created_at": "fake-time", "updated_at": "fake-time-2", "id": 2, "name": "ss-ldap", "description": "Fake Security Service Desc", "type": constants.SECURITY_SERVICES_ALLOWED_TYPES[1], "dns_ip": "2.2.2.2", "server": "test-server", "domain": "test-domain", "user": "test-user", "password": "test-password", "status": "active", "project_id": "fake", } self.valid_search_opts = { 'user': 'fake-user', 'server': 'fake-server', 'dns_ip': '1.1.1.1', 'domain': 'fake-domain', 'type': constants.SECURITY_SERVICES_ALLOWED_TYPES[0], } self.check_policy_patcher = mock.patch( 'manila.api.v1.security_service.policy.check_policy') self.check_policy_patcher.start() self.addCleanup(self._stop_started_patcher, self.check_policy_patcher) self.security_service_list_expected_resp = { 'security_services': [{ 'id': self.ss_active_directory['id'], 'name': self.ss_active_directory['name'], 'type': self.ss_active_directory['type'], 'status': self.ss_active_directory['status'] }, ] } def _stop_started_patcher(self, patcher): if hasattr(patcher, 'is_local'): patcher.stop() def test_security_service_show(self): db.security_service_get = mock.Mock( return_value=self.ss_active_directory) req = fakes.HTTPRequest.blank('/security-services/1') res_dict = self.controller.show(req, '1') expected = self.ss_active_directory.copy() expected.update() self.assertEqual({'security_service': self.ss_active_directory}, res_dict) def test_security_service_show_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_security_service_create(self): sec_service = self.ss_active_directory.copy() create_stub = mock.Mock( return_value=sec_service) self.mock_object(db, 'security_service_create', create_stub) req = fakes.HTTPRequest.blank('/security-services') res_dict = self.controller.create( req, {"security_service": sec_service}) expected = self.ss_active_directory.copy() self.assertEqual({'security_service': expected}, res_dict) def test_security_service_create_invalid_types(self): sec_service = self.ss_active_directory.copy() sec_service['type'] = 'invalid' req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(exception.InvalidInput, self.controller.create, req, {"security_service": sec_service}) def test_create_security_service_no_body(self): body = {} req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_security_service_delete(self): db.security_service_delete = mock.Mock() db.security_service_get = mock.Mock() db.share_network_get_all_by_security_service = mock.Mock( return_value=[]) req = fakes.HTTPRequest.blank('/security_services/1') resp = self.controller.delete(req, 1) db.security_service_delete.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(202, resp.status_int) def test_security_service_delete_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def test_security_service_delete_has_share_networks(self): db.security_service_get = mock.Mock() db.share_network_get_all_by_security_service = mock.Mock( return_value=[{'share_network': 'fake_share_network'}]) req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1) def test_security_service_update_name(self): new = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['name'] = 'new' self.mock_object(security_service.policy, 'check_policy') db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) db.share_network_get_all_by_security_service = mock.Mock( return_value=[{ 'id': 'fake_id', 'share_servers': 'fake_share_server' }]) body = {"security_service": {"name": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['name'], res_dict['name']) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', new) ]) def test_security_service_update_description(self): new = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['description'] = 'new' self.mock_object(security_service.policy, 'check_policy') db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) db.share_network_get_all_by_security_service = mock.Mock( return_value=[{ 'id': 'fake_id', 'share_servers': 'fake_share_server' }]) body = {"security_service": {"description": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['description'], res_dict['description']) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', new) ]) @mock.patch.object(db, 'security_service_get', mock.Mock()) @mock.patch.object(db, 'share_network_get_all_by_security_service', mock.Mock()) def test_security_service_update_invalid_keys_sh_server_exists(self): self.mock_object(security_service.policy, 'check_policy') db.share_network_get_all_by_security_service.return_value = [ {'id': 'fake_id', 'share_servers': 'fake_share_servers'}, ] db.security_service_get.return_value = self.ss_active_directory.copy() body = {'security_service': {'user_id': 'new_user'}} req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 1, body) db.security_service_get.assert_called_once_with( req.environ['manila.context'], 1) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(1, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', db.security_service_get.return_value) ]) @mock.patch.object(db, 'security_service_get', mock.Mock()) @mock.patch.object(db, 'security_service_update', mock.Mock()) @mock.patch.object(db, 'share_network_get_all_by_security_service', mock.Mock()) def test_security_service_update_valid_keys_sh_server_exists(self): self.mock_object(security_service.policy, 'check_policy') db.share_network_get_all_by_security_service.return_value = [ {'id': 'fake_id', 'share_servers': 'fake_share_servers'}, ] old = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['name'] = 'new name' updated['description'] = 'new description' db.security_service_get.return_value = old db.security_service_update.return_value = updated body = { 'security_service': { 'description': 'new description', 'name': 'new name', }, } req = fakes.HTTPRequest.blank('/security_services/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['description'], res_dict['description']) self.assertEqual(updated['name'], res_dict['name']) db.security_service_get.assert_called_once_with( req.environ['manila.context'], 1) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) db.security_service_update.assert_called_once_with( req.environ['manila.context'], 1, body['security_service']) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', old) ]) def test_security_service_list(self): db.security_service_get_all_by_project = mock.Mock( return_value=[self.ss_active_directory.copy()]) req = fakes.HTTPRequest.blank('/security_services') res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) @mock.patch.object(db, 'share_network_get', mock.Mock()) def test_security_service_list_filter_by_sn(self): sn = { 'id': 'fake_sn_id', 'security_services': [self.ss_active_directory, ], } db.share_network_get.return_value = sn req = fakes.HTTPRequest.blank( '/security-services?share_network_id=fake_sn_id') res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.share_network_get.assert_called_once_with( req.environ['manila.context'], sn['id']) @mock.patch.object(db, 'security_service_get_all', mock.Mock()) def test_security_services_list_all_tenants_admin_context(self): self.check_policy_patcher.stop() db.security_service_get_all.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?all_tenants=1&name=fake-name', use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.security_service_get_all.assert_called_once_with( req.environ['manila.context']) @mock.patch.object(db, 'security_service_get_all', mock.Mock()) def test_security_services_list_all_tenants_non_admin_context(self): self.check_policy_patcher.stop() db.security_service_get_all.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?all_tenants=1') self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) self.assertFalse(db.security_service_get_all.called) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_services_list_admin_context_invalid_opts(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?fake_opt=fake_value', use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual({'security_services': []}, res_dict) db.security_service_get_all_by_project.assert_called_once_with( req.environ['manila.context'], req.environ['manila.context'].project_id) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_service_list_all_filter_opts_separately(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] for opt, val in self.valid_search_opts.items(): for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank( '/security-services?' + opt + '=' + val, use_admin_context=use_admin_context) res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.security_service_get_all_by_project.assert_called_with( req.environ['manila.context'], req.environ['manila.context'].project_id) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_service_list_all_filter_opts(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] query_string = '/security-services?' + parse.urlencode(sorted( [(k, v) for (k, v) in list(self.valid_search_opts.items())])) for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank(query_string, use_admin_context=use_admin_context) res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.security_service_get_all_by_project.assert_called_with( req.environ['manila.context'], req.environ['manila.context'].project_id) manila-2.0.0/manila/tests/api/v1/test_share_types_extra_specs.py0000664000567000056710000004141312701407107026156 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_utils import strutils import webob from manila.api.v1 import share_types_extra_specs from manila.common import constants from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import fake_notifier import manila.wsgi DRIVER_HANDLES_SHARE_SERVERS = ( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS) def return_create_share_type_extra_specs(context, share_type_id, extra_specs): return stub_share_type_extra_specs() def return_share_type_extra_specs(context, share_type_id): return stub_share_type_extra_specs() def return_empty_share_type_extra_specs(context, share_type_id): return {} def delete_share_type_extra_specs(context, share_type_id, key): pass def delete_share_type_extra_specs_not_found(context, share_type_id, key): raise exception.ShareTypeExtraSpecsNotFound("Not Found") def stub_share_type_extra_specs(): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return specs def share_type_get(context, id, inactive=False, expected_fields=None): pass def get_large_string(): return "s" * 256 def get_extra_specs_dict(extra_specs, include_required=True): if not extra_specs: extra_specs = {} if include_required: extra_specs[DRIVER_HANDLES_SHARE_SERVERS] = False return {'extra_specs': extra_specs} @ddt.ddt class ShareTypesExtraSpecsTest(test.TestCase): def setUp(self): super(ShareTypesExtraSpecsTest, self).setUp() self.flags(host='fake') self.mock_object(manila.db, 'share_type_get', share_type_get) self.api_path = '/v2/fake/os-share-types/1/extra_specs' self.controller = ( share_types_extra_specs.ShareTypeExtraSpecsController()) self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object(policy, 'check_policy') """to reset notifier drivers left over from other api/contrib tests""" self.addCleanup(fake_notifier.reset) def test_index(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual('value1', res_dict['extra_specs']['key1']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_index_no_data(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_empty_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual(0, len(res_dict['extra_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_show(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key5') req_context = req.environ['manila.context'] res_dict = self.controller.show(req, 1, 'key5') self.assertEqual('value5', res_dict['key5']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') def test_show_spec_not_found(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_empty_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') def test_delete(self): self.mock_object(manila.db, 'share_type_extra_specs_delete', delete_share_type_extra_specs) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path + '/key5') req_context = req.environ['manila.context'] self.controller.delete(req, 1, 'key5') self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_delete_not_found(self): self.mock_object(manila.db, 'share_type_extra_specs_delete', delete_share_type_extra_specs_not_found) req = fakes.HTTPRequest.blank(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_delete_forbidden(self): req = fakes.HTTPRequest.blank( self.api_path + '/' + DRIVER_HANDLES_SHARE_SERVERS) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1, DRIVER_HANDLES_SHARE_SERVERS) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') @ddt.data( get_extra_specs_dict({}), {'foo': 'bar'}, {DRIVER_HANDLES_SHARE_SERVERS + 'foo': True}, {'foo' + DRIVER_HANDLES_SHARE_SERVERS: False}, *[{DRIVER_HANDLES_SHARE_SERVERS: v} for v in strutils.TRUE_STRINGS + strutils.FALSE_STRINGS] ) def test_create(self, data): body = {'extra_specs': data} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) for k, v in data.items(): self.assertIn(k, res_dict['extra_specs']) self.assertEqual(v, res_dict['extra_specs'][k]) manila.db.share_type_extra_specs_update_or_create.\ assert_called_once_with( req.environ['manila.context'], 1, body['extra_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_small_key(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) too_small_key = "" body = {"extra_specs": {too_small_key: "value"}} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_big_key(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) too_big_key = "k" * 256 body = {"extra_specs": {too_big_key: "value"}} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_small_value(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) too_small_value = "" body = {"extra_specs": {"key": too_small_value}} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) def test_create_with_too_big_value(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) too_big_value = "v" * 256 body = {"extra_specs": {"key": too_big_value}} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) def test_create_key_allowed_chars(self): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_extra_specs_dict({"other_alphanum.-_:": "value1"}) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(mock_return_value['key1'], res_dict['extra_specs']['other_alphanum.-_:']) manila.db.share_type_extra_specs_update_or_create.\ assert_called_once_with( req.environ['manila.context'], 1, body['extra_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_too_many_keys_allowed_chars(self): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_extra_specs_dict({ "other_alphanum.-_:": "value1", "other2_alphanum.-_:": "value2", "other3_alphanum.-_:": "value3" }) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(mock_return_value['key1'], res_dict['extra_specs']['other_alphanum.-_:']) self.assertEqual(mock_return_value['key2'], res_dict['extra_specs']['other2_alphanum.-_:']) self.assertEqual(mock_return_value['key3'], res_dict['extra_specs']['other3_alphanum.-_:']) manila.db.share_type_extra_specs_update_or_create.\ assert_called_once_with(req_context, 1, body['extra_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_update_item(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) body = {DRIVER_HANDLES_SHARE_SERVERS: True} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank( self.api_path + '/' + DRIVER_HANDLES_SHARE_SERVERS) req_context = req.environ['manila.context'] res_dict = self.controller.update( req, 1, DRIVER_HANDLES_SHARE_SERVERS, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertTrue(res_dict[DRIVER_HANDLES_SHARE_SERVERS]) manila.db.share_type_extra_specs_update_or_create.\ assert_called_once_with(req_context, 1, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_item_too_many_keys(self): self.mock_object(manila.db, 'share_type_extra_specs_update_or_create') body = {"key1": "value1", "key2": "value2"} req = fakes.HTTPRequest.blank(self.api_path + '/key1') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_item_body_uri_mismatch(self): self.mock_object(manila.db, 'share_type_extra_specs_update_or_create') body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/bad') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data(None, {}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}}) def test_update_invalid_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data( None, {}, {'foo': {'a': 'b'}}, {'extra_specs': 'string'}, {"extra_specs": {"ke/y1": "value1"}}, {"key1": "value1", "ke/y2": "value2", "key3": "value3"}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: "111"}}, {"extra_specs": {"": "value"}}, {"extra_specs": {"t": get_large_string()}}, {"extra_specs": {get_large_string(): get_large_string()}}, {"extra_specs": {get_large_string(): "v"}}, {"extra_specs": {"k": ""}}) def test_create_invalid_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') manila-2.0.0/manila/tests/api/v1/test_share_manage.py0000664000567000056710000001775312701407112023650 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock import webob from manila.api.v1 import share_manage from manila.db import api as db_api from manila import exception from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api import fakes from manila import utils def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL', protocol='fake', share_type='fake', **kwargs): fake_share = { 'export_path': export_path, 'service_host': service_host, 'protocol': protocol, 'share_type': share_type, } fake_share.update(kwargs) return {'share': fake_share} @ddt.ddt class ShareManageTest(test.TestCase): """Share Manage Test.""" def setUp(self): super(ShareManageTest, self).setUp() self.controller = share_manage.ShareManageController() self.resource_name = self.controller.resource_name self.request = fakes.HTTPRequest.blank('/share/manage', use_admin_context=True) self.context = self.request.environ['manila.context'] self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data({}, {'shares': {}}, {'share': get_fake_manage_body('', None, None)}) def test_share_manage_invalid_body(self, body): self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_service_not_found(self): body = get_fake_manage_body() self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock( side_effect=exception.ServiceNotFound(service_id='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_share_type_not_found(self): body = get_fake_manage_body() self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock()) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_type_get_by_name', mock.Mock( side_effect=exception.ShareTypeNotFoundByName( share_type_name='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def _setup_manage_mocks(self, service_is_up=True): self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock( return_value={'host': 'fake'})) self.mock_object(share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'id': 'fake'})) self.mock_object(utils, 'service_is_up', mock.Mock( return_value=service_is_up)) @ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'}, {'service_is_up': True, 'service_host': 'fake@host'}) def test_share_manage_bad_request(self, settings): body = get_fake_manage_body(service_host=settings.pop('service_host')) self._setup_manage_mocks(**settings) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_duplicate_share(self): body = get_fake_manage_body() self._setup_manage_mocks() self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exception.ManilaException())) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_forbidden_manage(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage', error) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_forbidden_validate_service_host(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(utils, 'service_is_up', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar', driver_options=dict(volume_id='quuz')), ) def test_share_manage(self, data): self._setup_manage_mocks() return_share = {'share_type_id': '', 'id': 'fake'} self.mock_object( share_api.API, 'manage', mock.Mock(return_value=return_share)) share = { 'host': data['share']['service_host'], 'export_location': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', } data['share']['is_public'] = 'foo' driver_options = data['share'].get('driver_options', {}) actual_result = self.controller.create(self.request, data) share_api.API.manage.assert_called_once_with( mock.ANY, share, driver_options) self.assertIsNotNone(actual_result) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_wrong_permissions(self): body = get_fake_manage_body() fake_req = fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, fake_req, body) self.mock_policy_check.assert_called_once_with( fake_req.environ['manila.context'], self.resource_name, 'manage') manila-2.0.0/manila/tests/api/v1/test_scheduler_stats.py0000664000567000056710000001554112701407107024427 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from manila.api.v1 import scheduler_stats from manila import context from manila import policy from manila.scheduler import rpcapi from manila import test from manila.tests.api import fakes FAKE_POOLS = [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'share_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'qos': 'False', }, }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'share_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'qos': 'True', }, }, ] class SchedulerStatsControllerTestCase(test.TestCase): def setUp(self): super(SchedulerStatsControllerTestCase, self).setUp() self.flags(host='fake') self.controller = scheduler_stats.SchedulerStatsController() self.resource_name = self.controller.resource_name self.ctxt = context.RequestContext('admin', 'fake', True) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def test_pools_index(self): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) req = fakes.HTTPRequest.blank('/v1/fake_project/scheduler_stats/pools') req.environ['manila.context'] = self.ctxt result = self.controller.pools_index(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } self.assertDictMatch(result, expected) mock_get_pools.assert_called_once_with(self.ctxt, filters={}) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'index') def test_pools_index_with_filters(self): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) url = '/v1/fake_project/scheduler-stats/pools/detail' url += '?backend=.%2A&host=host1&pool=pool%2A' req = fakes.HTTPRequest.blank(url) req.environ['manila.context'] = self.ctxt result = self.controller.pools_index(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } expected_filters = {'host': 'host1', 'pool': 'pool*', 'backend': '.*'} self.assertDictMatch(result, expected) mock_get_pools.assert_called_once_with(self.ctxt, filters=expected_filters) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'index') def test_get_pools_detail(self): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) req = fakes.HTTPRequest.blank( '/v1/fake_project/scheduler_stats/pools/detail') req.environ['manila.context'] = self.ctxt result = self.controller.pools_detail(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'share_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'qos': 'False', }, }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'share_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'qos': 'True', }, }, ], } self.assertDictMatch(expected, result) mock_get_pools.assert_called_once_with(self.ctxt, filters={}) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'detail') class SchedulerStatsTestCase(test.TestCase): def test_create_resource(self): result = scheduler_stats.create_resource() self.assertIsInstance(result.controller, scheduler_stats.SchedulerStatsController) manila-2.0.0/manila/tests/api/v1/test_share_networks.py0000664000567000056710000006716012701407107024275 0ustar jenkinsjenkins00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_db import exception as db_exception from oslo_utils import timeutils from six.moves.urllib import parse from webob import exc as webob_exc from manila.api.v1 import share_networks from manila.db import api as db_api from manila import exception from manila import quota from manila import test from manila.tests.api import fakes fake_share_network = { 'id': 'fake network id', 'project_id': 'fake project', 'created_at': timeutils.parse_strtime('2002-02-02', fmt="%Y-%m-%d"), 'updated_at': None, 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'fake name', 'description': 'fake description', 'share_servers': [], 'security_services': [] } fake_share_network_shortened = { 'id': 'fake network id', 'name': 'fake name', } fake_share_network_with_ss = { 'id': 'sn-id', 'project_id': 'fake', 'created_at': timeutils.parse_strtime('2001-01-01', fmt="%Y-%m-%d"), 'updated_at': None, 'neutron_net_id': '1111', 'neutron_subnet_id': '2222', 'network_type': 'local', 'segmentation_id': 2000, 'cidr': '8.0.0.0/12', 'ip_version': 6, 'name': 'test-sn', 'description': 'fake description', 'share_servers': [], 'security_services': [{'id': 'fake-ss-id'}] } fake_sn_with_ss_shortened = { 'id': 'sn-id', 'name': 'test-sn', } QUOTAS = quota.QUOTAS @ddt.ddt class ShareNetworkAPITest(test.TestCase): def setUp(self): super(ShareNetworkAPITest, self).setUp() self.controller = share_networks.ShareNetworkController() self.req = fakes.HTTPRequest.blank('/share-networks') self.body = {share_networks.RESOURCE_NAME: {'name': 'fake name'}} self.context = self.req.environ['manila.context'] def _check_share_network_view_shortened(self, view, share_nw): self.assertEqual(share_nw['id'], view['id']) self.assertEqual(share_nw['name'], view['name']) def _check_share_network_view(self, view, share_nw): self.assertEqual(share_nw['id'], view['id']) self.assertEqual(share_nw['project_id'], view['project_id']) self.assertEqual(share_nw['created_at'], view['created_at']) self.assertEqual(share_nw['updated_at'], view['updated_at']) self.assertEqual(share_nw['neutron_net_id'], view['neutron_net_id']) self.assertEqual(share_nw['neutron_subnet_id'], view['neutron_subnet_id']) self.assertEqual(share_nw['network_type'], view['network_type']) self.assertEqual(share_nw['segmentation_id'], view['segmentation_id']) self.assertEqual(share_nw['cidr'], view['cidr']) self.assertEqual(share_nw['ip_version'], view['ip_version']) self.assertEqual(share_nw['name'], view['name']) self.assertEqual(share_nw['description'], view['description']) self.assertEqual(share_nw['created_at'], view['created_at']) self.assertEqual(share_nw['updated_at'], view['updated_at']) self.assertFalse('shares' in view) self.assertFalse('network_allocations' in view) self.assertFalse('security_services' in view) @ddt.data( {'nova_net_id': 'fake_nova_net_id'}, {'neutron_net_id': 'fake_neutron_net_id'}, {'neutron_subnet_id': 'fake_neutron_subnet_id'}, {'neutron_net_id': 'fake', 'neutron_subnet_id': 'fake'}) def test_create_valid_cases(self, data): data.update({'user_id': 'fake_user_id'}) body = {share_networks.RESOURCE_NAME: data} result = self.controller.create(self.req, body) data.pop('user_id', None) for k, v in data.items(): self.assertIn(data[k], result['share_network'][k]) @ddt.data( {'nova_net_id': 'foo', 'neutron_net_id': 'bar'}, {'nova_net_id': 'foo', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'foo', 'neutron_net_id': 'bar', 'neutron_subnet_id': 'quuz'}) def test_create_invalid_cases(self, data): data.update({'user_id': 'fake_user_id'}) body = {share_networks.RESOURCE_NAME: data} self.assertRaises( webob_exc.HTTPBadRequest, self.controller.create, self.req, body) @ddt.data( {'nova_net_id': 'fake_nova_net_id'}, {'neutron_net_id': 'fake_neutron_net_id'}, {'neutron_subnet_id': 'fake_neutron_subnet_id'}, {'neutron_net_id': 'fake', 'neutron_subnet_id': 'fake'}) def test_update_valid_cases(self, data): body = {share_networks.RESOURCE_NAME: {'user_id': 'fake_user'}} created = self.controller.create(self.req, body) body = {share_networks.RESOURCE_NAME: data} result = self.controller.update( self.req, created['share_network']['id'], body) for k, v in data.items(): self.assertIn(data[k], result['share_network'][k]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], result['share_network']) @ddt.data( {'nova_net_id': 'foo', 'neutron_net_id': 'bar'}, {'nova_net_id': 'foo', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'foo', 'neutron_net_id': 'bar', 'neutron_subnet_id': 'quuz'}) def test_update_invalid_cases(self, data): body = {share_networks.RESOURCE_NAME: {'user_id': 'fake_user'}} created = self.controller.create(self.req, body) body = {share_networks.RESOURCE_NAME: data} self.assertRaises( webob_exc.HTTPBadRequest, self.controller.update, self.req, created['share_network']['id'], body) def test_create_nominal(self): with mock.patch.object(db_api, 'share_network_create', mock.Mock(return_value=fake_share_network)): result = self.controller.create(self.req, self.body) db_api.share_network_create.assert_called_once_with( self.req.environ['manila.context'], self.body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_create_db_api_exception(self): with mock.patch.object(db_api, 'share_network_create', mock.Mock(side_effect=db_exception.DBError)): self.assertRaises(webob_exc.HTTPBadRequest, self.controller.create, self.req, self.body) def test_create_wrong_body(self): body = None self.assertRaises(webob_exc.HTTPUnprocessableEntity, self.controller.create, self.req, body) def test_delete_nominal(self): share_nw = fake_share_network.copy() share_nw['share_servers'] = ['foo', 'bar'] self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instances_get_all_by_share_network', mock.Mock(return_value=[])) self.mock_object(self.controller.share_rpcapi, 'delete_share_server') self.mock_object(db_api, 'share_network_delete') self.controller.delete(self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) db_api.share_instances_get_all_by_share_network.\ assert_called_once_with(self.req.environ['manila.context'], share_nw['id']) self.controller.share_rpcapi.delete_share_server.assert_has_calls([ mock.call(self.req.environ['manila.context'], 'foo'), mock.call(self.req.environ['manila.context'], 'bar')]) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) def test_delete_not_found(self): share_nw = 'fake network id' self.mock_object(db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id=share_nw))) self.assertRaises(webob_exc.HTTPNotFound, self.controller.delete, self.req, share_nw) def test_quota_delete_reservation_failed(self): share_nw = fake_share_network.copy() share_nw['share_servers'] = ['foo', 'bar'] share_nw['user_id'] = 'fake_user_id' self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instances_get_all_by_share_network', mock.Mock(return_value=[])) self.mock_object(self.controller.share_rpcapi, 'delete_share_server') self.mock_object(db_api, 'share_network_delete') self.mock_object(share_networks.QUOTAS, 'reserve', mock.Mock(side_effect=Exception)) self.mock_object(share_networks.QUOTAS, 'commit') self.controller.delete(self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) db_api.share_instances_get_all_by_share_network.\ assert_called_once_with(self.req.environ['manila.context'], share_nw['id']) self.controller.share_rpcapi.delete_share_server.assert_has_calls([ mock.call(self.req.environ['manila.context'], 'foo'), mock.call(self.req.environ['manila.context'], 'bar')]) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) share_networks.QUOTAS.reserve.assert_called_once_with( self.req.environ['manila.context'], project_id=share_nw['project_id'], share_networks=-1, user_id=share_nw['user_id'] ) self.assertFalse(share_networks.QUOTAS.commit.called) def test_delete_in_use_by_share(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instances_get_all_by_share_network', mock.Mock(return_value=['foo', 'bar'])) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) db_api.share_instances_get_all_by_share_network.\ assert_called_once_with(self.req.environ['manila.context'], share_nw['id']) def test_delete_in_use_by_consistency_group(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'count_consistency_groups_in_share_network', mock.Mock(return_value=2)) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) def test_show_nominal(self): share_nw = 'fake network id' with mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)): result = self.controller.show(self.req, share_nw) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_show_not_found(self): share_nw = 'fake network id' test_exception = exception.ShareNetworkNotFound( share_network_id=share_nw) with mock.patch.object(db_api, 'share_network_get', mock.Mock(side_effect=test_exception)): self.assertRaises(webob_exc.HTTPNotFound, self.controller.show, self.req, share_nw) def test_index_no_filters(self): networks = [fake_share_network] with mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock(return_value=networks)): result = self.controller.index(self.req) db_api.share_network_get_all_by_project.assert_called_once_with( self.context, self.context.project_id) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_share_network_shortened) def test_index_detailed(self): networks = [fake_share_network] with mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock(return_value=networks)): result = self.controller.detail(self.req) db_api.share_network_get_all_by_project.assert_called_once_with( self.context, self.context.project_id) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view( result[share_networks.RESOURCES_NAME][0], fake_share_network) @mock.patch.object(db_api, 'share_network_get_all_by_security_service', mock.Mock()) def test_index_filter_by_security_service(self): db_api.share_network_get_all_by_security_service.return_value = [ fake_share_network_with_ss] req = fakes.HTTPRequest.blank( '/share_networks?security_service_id=fake-ss-id') result = self.controller.index(req) db_api.share_network_get_all_by_security_service.\ assert_called_once_with(req.environ['manila.context'], 'fake-ss-id') self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get_all', mock.Mock()) def test_index_all_tenants_non_admin_context(self): req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=1') self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) self.assertFalse(db_api.share_network_get_all.called) @mock.patch.object(db_api, 'share_network_get_all', mock.Mock()) def test_index_all_tenants_admin_context(self): db_api.share_network_get_all.return_value = [fake_share_network] req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=1', use_admin_context=True) result = self.controller.index(req) db_api.share_network_get_all.assert_called_once_with( req.environ['manila.context']) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_share_network_shortened) @mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock()) def test_index_filter_by_project_id_non_admin_context(self): req = fakes.HTTPRequest.blank( '/share_networks?project_id=fake project') self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) self.assertFalse(db_api.share_network_get_all_by_project.called) @mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock()) def test_index_filter_by_project_id_admin_context(self): db_api.share_network_get_all_by_project.return_value = [ fake_share_network, fake_share_network_with_ss, ] req = fakes.HTTPRequest.blank( '/share_networks?project_id=fake', use_admin_context=True) result = self.controller.index(req) db_api.share_network_get_all_by_project.assert_called_once_with( req.environ['manila.context'], 'fake') self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get_all_by_security_service', mock.Mock()) def test_index_filter_by_ss_and_project_id_admin_context(self): db_api.share_network_get_all_by_security_service.return_value = [ fake_share_network, fake_share_network_with_ss, ] req = fakes.HTTPRequest.blank( '/share_networks?security_service_id=fake-ss-id&project_id=fake', use_admin_context=True) result = self.controller.index(req) db_api.share_network_get_all_by_security_service.\ assert_called_once_with(req.environ['manila.context'], 'fake-ss-id') self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get_all_by_project', mock.Mock()) def test_index_all_filter_opts(self): valid_filter_opts = { 'created_before': '2001-02-02', 'created_since': '1999-01-01', 'neutron_net_id': '1111', 'neutron_subnet_id': '2222', 'network_type': 'local', 'segmentation_id': 2000, 'cidr': '8.0.0.0/12', 'ip_version': 6, 'name': 'test-sn' } db_api.share_network_get_all_by_project.return_value = [ fake_share_network, fake_share_network_with_ss] query_string = '/share-networks?' + parse.urlencode(sorted( [(k, v) for (k, v) in list(valid_filter_opts.items())])) for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank(query_string, use_admin_context=use_admin_context) result = self.controller.index(req) db_api.share_network_get_all_by_project.assert_called_with( req.environ['manila.context'], 'fake') self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_nominal(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network body = {share_networks.RESOURCE_NAME: {'name': 'new name'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)): result = self.controller.update(self.req, share_nw, body) db_api.share_network_update.assert_called_once_with( self.req.environ['manila.context'], share_nw, body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_not_found(self): share_nw = 'fake network id' db_api.share_network_get.side_effect = exception.ShareNetworkNotFound( share_network_id=share_nw) self.assertRaises(webob_exc.HTTPNotFound, self.controller.update, self.req, share_nw, self.body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_invalid_key_in_use(self): share_nw = fake_share_network.copy() share_nw['share_servers'] = [{'id': 1}] db_api.share_network_get.return_value = share_nw body = { share_networks.RESOURCE_NAME: { 'name': 'new name', 'user_id': 'new id', }, } self.assertRaises(webob_exc.HTTPForbidden, self.controller.update, self.req, share_nw['id'], body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_update_valid_keys_in_use(self): share_nw = fake_share_network.copy() share_nw['share_servers'] = [{'id': 1}] updated_share_nw = share_nw.copy() updated_share_nw['name'] = 'new name' updated_share_nw['description'] = 'new description' db_api.share_network_get.return_value = share_nw body = { share_networks.RESOURCE_NAME: { 'name': updated_share_nw['name'], 'description': updated_share_nw['description'], }, } self.controller.update(self.req, share_nw['id'], body) db_api.share_network_get.assert_called_once_with(self.context, share_nw['id']) db_api.share_network_update.assert_called_once_with( self.context, share_nw['id'], body['share_network']) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_db_api_exception(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network body = {share_networks.RESOURCE_NAME: {'neutron_subnet_id': 'new subnet'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(side_effect=db_exception.DBError)): self.assertRaises(webob_exc.HTTPBadRequest, self.controller.update, self.req, share_nw, body) def test_action_add_security_service(self): share_network_id = 'fake network id' security_service_id = 'fake ss id' body = {'add_security_service': {'security_service_id': security_service_id}} with mock.patch.object(self.controller, '_add_security_service', mock.Mock()): self.controller.action(self.req, share_network_id, body) self.controller._add_security_service.assert_called_once_with( self.req, share_network_id, body['add_security_service']) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) @mock.patch.object(db_api, 'security_service_get', mock.Mock()) def test_action_add_security_service_conflict(self): share_network = fake_share_network.copy() share_network['security_services'] = [{'id': 'security_service_1', 'type': 'ldap'}] security_service = {'id': ' security_service_2', 'type': 'ldap'} body = {'add_security_service': {'security_service_id': security_service['id']}} db_api.security_service_get.return_value = security_service db_api.share_network_get.return_value = share_network with mock.patch.object(share_networks.policy, 'check_policy', mock.Mock()): self.assertRaises(webob_exc.HTTPConflict, self.controller.action, self.req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) db_api.security_service_get.assert_called_once_with( self.req.environ['manila.context'], security_service['id']) share_networks.policy.check_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, 'add_security_service', ) def test_action_remove_security_service(self): share_network_id = 'fake network id' security_service_id = 'fake ss id' body = {'remove_security_service': {'security_service_id': security_service_id}} with mock.patch.object(self.controller, '_remove_security_service', mock.Mock()): self.controller.action(self.req, share_network_id, body) self.controller._remove_security_service.assert_called_once_with( self.req, share_network_id, body['remove_security_service']) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) @mock.patch.object(share_networks.policy, 'check_policy', mock.Mock()) def test_action_remove_security_service_forbidden(self): share_network = fake_share_network.copy() share_network['share_servers'] = 'fake share server' db_api.share_network_get.return_value = share_network body = { 'remove_security_service': { 'security_service_id': 'fake id', }, } self.assertRaises(webob_exc.HTTPForbidden, self.controller.action, self.req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) share_networks.policy.check_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, 'remove_security_service') def test_action_bad_request(self): share_network_id = 'fake network id' body = {'bad_action': {}} self.assertRaises(webob_exc.HTTPBadRequest, self.controller.action, self.req, share_network_id, body) manila-2.0.0/manila/tests/api/v1/test_shares.py0000664000567000056710000013161012701407107022514 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v1 import shares from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ShareAPITest(test.TestCase): """Share API Test.""" def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(db, 'availability_zone_get') self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.maxDiff = None self.share = { "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, } self.create_mock = mock.Mock( return_value=stubs.stub_share( '1', display_name=self.share['display_name'], display_description=self.share['display_description'], size=100, share_proto=self.share['share_proto'].upper(), availability_zone=self.share['availability_zone']) ) self.vt = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', } CONF.set_default("default_share_type", None) def _get_expected_share_detailed_response(self, values=None, admin=False): share = { 'id': '1', 'name': 'displayname', 'availability_zone': 'fakeaz', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'project_id': 'fakeproject', 'host': 'fakehost', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share_proto': 'FAKEPROTO', 'metadata': {}, 'size': 1, 'snapshot_id': '2', 'share_network_id': None, 'status': 'fakestatus', 'share_type': '1', 'volume_type': '1', 'snapshot_support': True, 'is_public': False, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } if values: if 'display_name' in values: values['name'] = values.pop('display_name') if 'display_description' in values: values['description'] = values.pop('display_description') share.update(values) if share.get('share_proto'): share['share_proto'] = share['share_proto'].upper() if admin: share['share_server_id'] = 'fake_share_server_id' return {'share': share} @ddt.data("1.0", "2.0", "2.1") def test_share_create_original(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) @ddt.data("2.2", "2.3") def test_share_create_with_snapshot_support_without_cg(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) self.assertEqual(expected, res_dict) @ddt.data("2.4", "2.5") def test_share_create_with_consistency_group(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None if (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.5')): expected['share']['task_state'] = None self.assertEqual(expected, res_dict) def test_share_create_with_valid_default_share_type(self): self.mock_object(share_types, 'get_share_type_by_name', mock.Mock(return_value=self.vt)) CONF.set_default("default_share_type", self.vt['name']) self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') share_types.get_share_type_by_name.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.vt['name']) self.assertEqual(expected, res_dict) def test_share_create_with_invalid_default_share_type(self): self.mock_object( share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFoundByName( self.vt['name'])), ) CONF.set_default("default_share_type", self.vt['name']) req = fakes.HTTPRequest.blank('/shares') self.assertRaises(exception.ShareTypeNotFoundByName, self.controller.create, req, {'share': self.share}) share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_dhss_true_and_network_notexist(self): fake_share_type = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', 'extra_specs': { 'driver_handles_share_servers': True, } } self.mock_object( share_types, 'get_default_share_type', mock.Mock(return_value=fake_share_type), ) CONF.set_default("default_share_type", fake_share_type['name']) req = fakes.HTTPRequest.blank('/shares') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, {'share': self.share}) share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_share_net(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': 'fakenetid'})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], snapshot_id=shr['snapshot_id'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) def test_share_create_from_snapshot_without_share_net_parent_exists(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } parent_share_net = 444 create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_with_share_net_equals_parent(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_invalid_share_net(self): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": 1234 } body = {"share": shr} req = fakes.HTTPRequest.blank('/shares') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_creation_fails_with_bad_size(self): shr = {"size": '', "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1"} body = {"share": shr} req = fakes.HTTPRequest.blank('/shares') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_share_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/shares') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_share_create_invalid_availability_zone(self): self.mock_object( db, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id')) ) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) def test_share_show(self): req = fakes.HTTPRequest.blank('/shares/1') expected = self._get_expected_share_detailed_response() expected['share'].pop('snapshot_support') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_consistency_group(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.4') expected = self._get_expected_share_detailed_response() expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_share_type_name(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.6') res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['share_type_name'] = None expected['share']['task_state'] = None self.assertEqual(expected, res_dict) def test_share_show_admin(self): req = fakes.HTTPRequest.blank('/shares/1', use_admin_context=True) expected = self._get_expected_share_detailed_response(admin=True) expected['share'].pop('snapshot_support') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_share_delete(self): req = fakes.HTTPRequest.blank('/shares/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_in_consistency_group_param_not_provided(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_delete_in_consistency_group(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=fake_cg_id') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_in_consistency_group_wrong_id(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=not_fake_cg_id') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_update(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(shr["display_name"], res_dict['share']["name"]) self.assertEqual(shr["display_description"], res_dict['share']["description"]) self.assertEqual(shr['is_public'], res_dict['share']['is_public']) def test_share_update_with_consistency_group(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1', version="2.4") res_dict = self.controller.update(req, 1, body) self.assertIsNone(res_dict['share']["consistency_group_id"]) self.assertIsNone(res_dict['share']["source_cgsnapshot_member_id"]) def test_share_not_updates_size(self): req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, {"share": self.share}) self.assertNotEqual(res_dict['share']["size"], self.share["size"]) def test_share_delete_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def _share_list_summary_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, {'id': 'id2', 'display_name': 'n2'}, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) def test_share_list_summary_with_search_opts_by_non_admin(self): self._share_list_summary_with_search_opts(use_admin_context=False) def test_share_list_summary_with_search_opts_by_admin(self): self._share_list_summary_with_search_opts(use_admin_context=True) def test_share_list_summary(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.index(req) expected = { 'shares': [ { 'name': 'displayname', 'id': '1', 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _share_list_detail_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, { 'id': 'id2', 'display_name': 'n2', 'status': constants.STATUS_AVAILABLE, 'snapshot_id': 'fake_snapshot_id', 'share_type_id': 'fake_share_type_id', 'instance': {'host': 'fake_host', 'share_network_id': 'fake_share_network_id'}, }, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['status'], result['shares'][0]['status']) self.assertEqual( shares[1]['share_type_id'], result['shares'][0]['share_type']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['instance']['host'], result['shares'][0]['host']) self.assertEqual( shares[1]['instance']['share_network_id'], result['shares'][0]['share_network_id']) def test_share_list_detail_with_search_opts_by_non_admin(self): self._share_list_detail_with_search_opts(use_admin_context=False) def test_share_list_detail_with_search_opts_by_admin(self): self._share_list_detail_with_search_opts(use_admin_context=True) def _list_detail_common_expected(self): return { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'host': 'fakehost', 'id': '1', 'snapshot_id': '2', 'snapshot_support': True, 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } def _list_detail_test_common(self, req, expected): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) res_dict = self.controller.detail(req) self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_share_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env) expected = self._list_detail_common_expected() expected['shares'][0].pop('snapshot_support') self._list_detail_test_common(req, expected) def test_share_list_detail_with_consistency_group(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.4") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_with_task_state(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.5") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None expected['shares'][0]['task_state'] = None self._list_detail_test_common(req, expected) def test_remove_invalid_options(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'c': 'c'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_remove_invalid_options_admin(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_validate_cephx_id_invalid_with_period(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller._validate_cephx_id, "client.manila") def test_validate_cephx_id_invalid_with_non_ascii(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller._validate_cephx_id, u"bj\u00F6rn") @ddt.data("alice", "alice_bob", "alice bob") def test_validate_cephx_id_valid(self, test_id): self.controller._validate_cephx_id(test_id) def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access @ddt.ddt class ShareActionsTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) @ddt.data( {'access_type': 'ip', 'access_to': '127.0.0.1'}, {'access_type': 'user', 'access_to': '1' * 4}, {'access_type': 'user', 'access_to': '1' * 32}, {'access_type': 'user', 'access_to': 'fake\\]{.-_\'`;}['}, {'access_type': 'user', 'access_to': 'MYDOMAIN\\Administrator'}, {'access_type': 'cert', 'access_to': 'x'}, {'access_type': 'cert', 'access_to': 'tenant.example.com'}, {'access_type': 'cert', 'access_to': 'x' * 64}, ) def test_allow_access(self, access): self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) id = 'fake_share_id' body = {'os-allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._allow_access(req, id, body) self.assertEqual(expected, res) @ddt.data( {'access_type': 'error_type', 'access_to': '127.0.0.1'}, {'access_type': 'ip', 'access_to': 'localhost'}, {'access_type': 'ip', 'access_to': '127.0.0.*'}, {'access_type': 'ip', 'access_to': '127.0.0.0/33'}, {'access_type': 'ip', 'access_to': '127.0.0.256'}, {'access_type': 'user', 'access_to': '1'}, {'access_type': 'user', 'access_to': '1' * 3}, {'access_type': 'user', 'access_to': '1' * 33}, {'access_type': 'user', 'access_to': 'root^'}, {'access_type': 'cert', 'access_to': ''}, {'access_type': 'cert', 'access_to': ' '}, {'access_type': 'cert', 'access_to': 'x' * 65}, {'access_type': 'cephx', 'access_to': 'alice'} ) def test_allow_access_error(self, access): id = 'fake_share_id' body = {'os-allow_access': access} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body) def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) def test_access_list(self): def _fake_access_get_all(*args, **kwargs): return [{"state": "fakestatus", "id": "fake_share_id", "access_type": "fakeip", "access_to": "127.0.0.1"}] self.mock_object(share_api.API, "access_get_all", _fake_access_get_all) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) expected = _fake_access_get_all() self.assertEqual(expected, res_dict['access_list']) def test_extend(self): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "extend") size = '123' body = {"os-extend": {'new_size': size}} req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) actual_response = self.controller._extend(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.extend.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-extend": ""}, {"os-extend": {"new_size": "foo"}}, {"os-extend": {"new_size": {'foo': 'bar'}}}) def test_extend_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}, {'source': exception.ShareSizeExceedsAvailableQuota, 'target': webob.exc.HTTPForbidden}) @ddt.unpack def test_extend_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-extend": {'new_size': '123'}} self.mock_object(share_api.API, "extend", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._extend, req, id, body) def test_shrink(self): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "shrink") size = '123' body = {"os-shrink": {'new_size': size}} req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) actual_response = self.controller._shrink(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.shrink.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-shrink": ""}, {"os-shrink": {"new_size": "foo"}}, {"os-shrink": {"new_size": {'foo': 'bar'}}}) def test_shrink_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._shrink, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}) @ddt.unpack def test_shrink_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-shrink": {'new_size': '123'}} self.mock_object(share_api.API, "shrink", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._shrink, req, id, body) @ddt.ddt class ShareAdminActionsAPITest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() CONF.set_default("default_share_type", None) self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.share_api = share_api.API() self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_data(self, share=None): if share is None: share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1', override_defaults=True) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) return share, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None): if body is None: body = {'os-reset_status': {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, }, ) @ddt.unpack def test_share_reset_status_with_different_roles(self, role, valid_code, valid_status): share, req = self._setup_share_data() ctxt = self._get_context(role) self._reset_status(ctxt, share, req, db.share_get, valid_code, valid_status) @ddt.data(*fakes.fixture_invalid_reset_status_body) def test_share_invalid_reset_status_body(self, body): share, req = self._setup_share_data() ctxt = self.admin_context self._reset_status(ctxt, share, req, db.share_get, 400, constants.STATUS_AVAILABLE, body) def test_share_reset_status_for_missing(self): fake_share = {'id': 'missing-share-id'} req = webob.Request.blank('/v1/fake/shares/%s/action' % fake_share['id']) self._reset_status(self.admin_context, fake_share, req, db.share_snapshot_get, 404) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False): req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps({'os-force_delete': {}})) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data( {'role': 'admin', 'resp_code': 202}, {'role': 'member', 'resp_code': 403}, ) @ddt.unpack def test_share_force_delete_with_different_roles(self, role, resp_code): share, req = self._setup_share_data() ctxt = self._get_context(role) self._force_delete(ctxt, share, req, db.share_get, resp_code, check_model_in_db=True) def test_share_force_delete_missing(self): share, req = self._setup_share_data(share={'id': 'fake'}) ctxt = self._get_context('admin') self._force_delete(ctxt, share, req, db.share_get, 404) manila-2.0.0/manila/tests/api/v1/test_share_snapshots.py0000664000567000056710000004123012701407107024431 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from oslo_serialization import jsonutils import six import webob from manila.api.v1 import share_snapshots from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share @ddt.ddt class ShareSnapshotAPITest(test.TestCase): """Share Snapshot API Test.""" def setUp(self): super(self.__class__, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'snapshot_update', stubs.stub_snapshot_update) self.snp_example = { 'share_id': 100, 'size': 12, 'force': False, 'display_name': 'updated_share_name', 'display_description': 'updated_share_description', } self.maxDiff = None def test_snapshot_show_status_none(self): return_snapshot = { 'share_id': 100, 'name': 'fake_share_name', 'description': 'fake_share_description', 'status': None, } self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=return_snapshot)) req = fakes.HTTPRequest.blank('/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') @ddt.data('true', 'True', ' True', '1') def test_snapshot_create(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot', stubs.stub_snapshot_create) body = { 'snapshot': { 'share_id': 'fakeshareid', 'force': False, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } } req = fakes.HTTPRequest.blank('/snapshots') res_dict = self.controller.create(req, body) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) @ddt.data(0, False) def test_snapshot_create_no_support(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': snapshot_support})) body = { 'snapshot': { 'share_id': 100, 'force': False, 'name': 'fake_share_name', 'description': 'fake_share_description', } } req = fakes.HTTPRequest.blank('/snapshots') self.assertRaises( webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) self.assertFalse(share_api.API.create_snapshot.called) def test_snapshot_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/snapshots') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_snapshot_delete(self): self.mock_object(share_api.API, 'delete_snapshot', stubs.stub_snapshot_delete) req = fakes.HTTPRequest.blank('/snapshots/200') resp = self.controller.delete(req, 200) self.assertEqual(202, resp.status_int) def test_snapshot_delete_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 200) def test_snapshot_show(self): req = fakes.HTTPRequest.blank('/snapshots/200') res_dict = self.controller.show(req, 200) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) def test_snapshot_show_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') def test_snapshot_list_summary(self): self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) req = fakes.HTTPRequest.blank('/snapshots') res_dict = self.controller.index(req) expected = { 'snapshots': [ { 'name': 'displaysnapname', 'id': 2, 'links': [ { 'href': 'http://localhost/v1/fake/' 'snapshots/2', 'rel': 'self' }, { 'href': 'http://localhost/fake/snapshots/2', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _snapshot_list_summary_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/snapshots?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) snapshots = [ {'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', 'share_id': 'fake_share_id'}, {'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', 'share_id': 'fake_share_id'}, {'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', 'share_id': 'fake_share_id'}, ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[1]['display_name'], result['snapshots'][0]['name']) def test_snapshot_list_summary_with_search_opts_by_non_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=False) def test_snapshot_list_summary_with_search_opts_by_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=True) def _snapshot_list_detail_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) snapshots = [ { 'id': 'id1', 'display_name': 'n1', 'status': 'fake_status_other', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id3', 'display_name': 'n3', 'status': 'fake_status_other', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[1]['display_name'], result['snapshots'][0]['name']) self.assertEqual( snapshots[1]['status'], result['snapshots'][0]['status']) self.assertEqual( snapshots[1]['share_id'], result['snapshots'][0]['share_id']) def test_snapshot_list_detail_with_search_opts_by_non_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=False) def test_snapshot_list_detail_with_search_opts_by_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=True) def test_snapshot_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env) res_dict = self.controller.detail(req) expected_s = fake_share.expected_snapshot(id=2) expected = {'snapshots': [expected_s['snapshot']]} self.assertEqual(expected, res_dict) def test_snapshot_list_status_none(self): snapshots = [ { 'id': 2, 'share_id': 'fakeshareid', 'size': 1, 'status': 'fakesnapstatus', 'name': 'displaysnapname', 'description': 'displaysnapdesc', }, { 'id': 3, 'share_id': 'fakeshareid', 'size': 1, 'status': None, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) req = fakes.HTTPRequest.blank('/snapshots') result = self.controller.index(req) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[0]['id'], result['snapshots'][0]['id']) def test_snapshot_updates_description(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_name"], res_dict['snapshot']["name"]) def test_snapshot_updates_display_descr(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_description"], res_dict['snapshot']["description"]) def test_share_not_updates_size(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertNotEqual(snp["size"], res_dict['snapshot']["size"]) @ddt.ddt class ShareSnapshotAdminActionsAPITest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_snapshot_data(self, snapshot=None): if snapshot is None: share = db_utils.create_share() snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank('/v1/fake/snapshots/%s/action' % snapshot['id']) return snapshot, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None): action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles_v1) @ddt.unpack def test_snapshot_reset_status_with_different_roles(self, role, valid_code, valid_status): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data() self._reset_status(ctxt, snapshot, req, db.share_snapshot_get, valid_code, valid_status) @ddt.data( {'os-reset_status': {'x-status': 'bad'}}, {'os-reset_status': {'status': 'invalid'}}, ) def test_snapshot_invalid_reset_status_body(self, body): snapshot, req = self._setup_snapshot_data() self._reset_status(self.admin_context, snapshot, req, db.share_snapshot_get, 400, constants.STATUS_AVAILABLE, body) def _force_delete(self, ctxt, model, req, db_access_method, valid_code): action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = six.b(jsonutils.dumps({action_name: {}})) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # Validate response self.assertEqual(valid_code, resp.status_int) @ddt.data( {'role': 'admin', 'resp_code': 202}, {'role': 'member', 'resp_code': 403}, ) @ddt.unpack def test_snapshot_force_delete_with_different_roles(self, role, resp_code): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data() self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, resp_code) def test_snapshot_force_delete_missing(self): ctxt = self._get_context('admin') snapshot, req = self._setup_snapshot_data(snapshot={'id': 'fake'}) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, 404) manila-2.0.0/manila/tests/api/v1/test_share_metadata.py0000664000567000056710000003064712701407107024201 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api.v1 import share_metadata from manila.api.v1 import shares from manila import context from manila import db from manila.share import api from manila import test from manila.tests.api import fakes CONF = cfg.CONF @ddt.ddt class ShareMetaDataTest(test.TestCase): def setUp(self): super(ShareMetaDataTest, self).setUp() self.share_api = api.API() self.share_controller = shares.ShareController() self.controller = share_metadata.ShareMetadataController() self.ctxt = context.RequestContext('admin', 'fake', True) self.origin_metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } self.share = db.share_create(self.ctxt, {}) self.share_id = self.share['id'] self.url = '/shares/%s/metadata' % self.share_id db.share_metadata_update( self.ctxt, self.share_id, self.origin_metadata, delete=False) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.share_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) def test_index_no_data(self): db.share_metadata_update( self.ctxt, self.share_id, {}, delete=True) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.share_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.share_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises( webob.exc.HTTPNotFound, self.controller.show, req, "nonexistent_share", 'key2') def test_show_meta_not_found(self): req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.share_id, 'key6') def test_delete(self): req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' res = self.controller.delete(req, self.share_id, 'key2') self.assertEqual(200, res.status_int) def test_delete_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' self.assertRaises( webob.exc.HTTPNotFound, self.controller.delete, req, "nonexistent_share", 'key1') def test_delete_meta_not_found(self): req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.share_id, 'key6') def test_create(self): req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = six.b(jsonutils.dumps(body)) res_dict = self.controller.create(req, self.share_id, body) expected = self.origin_metadata expected.update(body['metadata']) self.assertEqual({'metadata': expected}, res_dict) def test_create_empty_body(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, None) def test_create_item_empty_key(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, body) def test_create_item_key_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, body) def test_create_nonexistent_share(self): req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = six.b(jsonutils.dumps(body)) self.assertRaises( webob.exc.HTTPNotFound, self.controller.create, req, "nonexistent_share", body) def test_update_all(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', }, } req.body = six.b(jsonutils.dumps(expected)) res_dict = self.controller.update_all(req, self.share_id, expected) self.assertEqual(expected, res_dict) def test_update_all_empty_container(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = six.b(jsonutils.dumps(expected)) res_dict = self.controller.update_all(req, self.share_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = six.b(jsonutils.dumps(expected)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.share_id, expected) @ddt.data(['asdf'], {'key': None}, {None: 'value'}, {None: None}) def test_update_all_malformed_data(self, metadata): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': metadata} req.body = six.b(jsonutils.dumps(expected)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.share_id, expected) def test_update_all_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = six.b(jsonutils.dumps(body)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) def test_update_item(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.share_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_share(self): req = fakes.HTTPRequest.blank('/v1.1/fake/shares/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises( webob.exc.HTTPNotFound, self.controller.update, req, "nonexistent_share", 'key1', body) def test_update_item_empty_body(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'key1', None) def test_update_item_empty_key(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, '', body) def test_update_item_key_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, ("a" * 260), body) def test_update_item_value_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 1025)}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, "key1", body) def test_update_item_too_many_keys(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'key1', body) def test_update_item_body_uri_mismatch(self): req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = six.b(jsonutils.dumps(body)) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'bad', body) def test_invalid_metadata_items_on_create(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = six.b(jsonutils.dumps(data)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) # test for long value data = {"metadata": {"key": "v" * 1025}} req.body = six.b(jsonutils.dumps(data)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = six.b(jsonutils.dumps(data)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) manila-2.0.0/manila/tests/api/v1/test_share_servers.py0000664000567000056710000003212012701407107024076 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from webob import exc from manila.api.v1 import share_servers from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila import policy from manila import test fake_share_server_list = { 'share_servers': [ { 'status': constants.STATUS_ACTIVE, 'updated_at': None, 'host': 'fake_host', 'share_network_id': 'fake_sn_id', 'share_network_name': 'fake_sn_name', 'project_id': 'fake_project_id', 'id': 'fake_server_id', }, { 'status': constants.STATUS_ERROR, 'updated_at': None, 'host': 'fake_host_2', 'share_network_id': 'fake_sn_id_2', 'share_network_name': 'fake_sn_id_2', 'project_id': 'fake_project_id_2', 'id': 'fake_server_id_2', }, ] } fake_share_server_get_result = { 'share_server': { 'status': constants.STATUS_ACTIVE, 'created_at': None, 'updated_at': None, 'host': 'fake_host', 'share_network_name': 'fake_sn_name', 'share_network_id': 'fake_sn_id', 'project_id': 'fake_project_id', 'id': 'fake_server_id', 'backend_details': { 'fake_key_1': 'fake_value_1', 'fake_key_2': 'fake_value_2', } } } share_server_backend_details = { 'fake_key_1': 'fake_value_1', 'fake_key_2': 'fake_value_2', } fake_share_server_backend_details_get_result = { 'details': share_server_backend_details } CONTEXT = context.get_admin_context() class FakeShareServer(object): def __init__(self, *args, **kwargs): super(FakeShareServer, self).__init__() self.id = kwargs.get('id', 'fake_server_id') if 'created_at' in kwargs: self.created_at = kwargs.get('created_at', None) self.updated_at = kwargs.get('updated_at', None) self.host = kwargs.get('host', 'fake_host') self.share_network = kwargs.get('share_network', { 'name': 'fake_sn_name', 'id': 'fake_sn_id', 'project_id': 'fake_project_id'}) self.share_network_id = kwargs.get('share_network_id', self.share_network['id']) self.status = kwargs.get('status', constants.STATUS_ACTIVE) self.project_id = self.share_network['project_id'] self.backend_details = share_server_backend_details def __getitem__(self, item): return getattr(self, item) def fake_share_server_get_all(): fake_share_servers = [ FakeShareServer(), FakeShareServer(id='fake_server_id_2', host='fake_host_2', share_network={ 'name': None, 'id': 'fake_sn_id_2', 'project_id': 'fake_project_id_2'}, status=constants.STATUS_ERROR) ] return fake_share_servers def fake_share_server_get(): return FakeShareServer(created_at=None) class FakeRequestAdmin(object): environ = {"manila.context": CONTEXT} GET = {} class FakeRequestWithHost(FakeRequestAdmin): GET = {'host': fake_share_server_list['share_servers'][0]['host']} class FakeRequestWithStatus(FakeRequestAdmin): GET = {'status': constants.STATUS_ERROR} class FakeRequestWithProjectId(FakeRequestAdmin): GET = {'project_id': fake_share_server_get_all()[0].project_id} class FakeRequestWithShareNetworkName(FakeRequestAdmin): GET = { 'share_network': fake_share_server_get_all()[0].share_network['name'], } class FakeRequestWithShareNetworkId(FakeRequestAdmin): GET = { 'share_network': fake_share_server_get_all()[0].share_network['id'], } class FakeRequestWithFakeFilter(FakeRequestAdmin): GET = {'fake_key': 'fake_value'} class ShareServerAPITest(test.TestCase): def setUp(self): super(ShareServerAPITest, self).setUp() self.controller = share_servers.ShareServerController() self.resource_name = self.controller.resource_name self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_server_get_all', mock.Mock(return_value=fake_share_server_get_all())) def test_index_no_filters(self): result = self.controller.index(FakeRequestAdmin) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual(fake_share_server_list, result) def test_index_host_filter(self): result = self.controller.index(FakeRequestWithHost) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_status_filter(self): result = self.controller.index(FakeRequestWithStatus) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual([fake_share_server_list['share_servers'][1]], result['share_servers']) def test_index_project_id_filter(self): result = self.controller.index(FakeRequestWithProjectId) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_share_network_filter_by_name(self): result = self.controller.index(FakeRequestWithShareNetworkName) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_share_network_filter_by_id(self): result = self.controller.index(FakeRequestWithShareNetworkId) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_fake_filter(self): result = self.controller.index(FakeRequestWithFakeFilter) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(CONTEXT) self.assertEqual(0, len(result['share_servers'])) def test_show(self): self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server_get())) result = self.controller.show( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'show') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.assertEqual(fake_share_server_get_result['share_server'], result['share_server']) def test_details(self): self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server_get())) result = self.controller.details( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'details') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.assertEqual(fake_share_server_backend_details_get_result, result) def test_delete_active_server(self): share_server = FakeShareServer(status=constants.STATUS_ACTIVE) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server') self.controller.delete( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_error_server(self): share_server = FakeShareServer(status=constants.STATUS_ERROR) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server') self.controller.delete( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_used_server(self): share_server_id = fake_share_server_get_result['share_server']['id'] def raise_not_share_server_in_use(*args, **kwargs): raise exception.ShareServerInUse(share_server_id=share_server_id) share_server = fake_share_server_get() self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server', mock.Mock(side_effect=raise_not_share_server_in_use)) self.assertRaises(exc.HTTPConflict, self.controller.delete, FakeRequestAdmin, share_server_id) db_api.share_server_get.assert_called_once_with(CONTEXT, share_server_id) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_not_found(self): share_server_id = fake_share_server_get_result['share_server']['id'] def raise_not_found(*args, **kwargs): raise exception.ShareServerNotFound( share_server_id=share_server_id) self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=raise_not_found)) self.assertRaises(exc.HTTPNotFound, self.controller.delete, FakeRequestAdmin, share_server_id) db_api.share_server_get.assert_called_once_with( CONTEXT, share_server_id) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') def test_delete_creating_server(self): share_server = FakeShareServer(status=constants.STATUS_CREATING) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.assertRaises(exc.HTTPForbidden, self.controller.delete, FakeRequestAdmin, share_server['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') def test_delete_deleting_server(self): share_server = FakeShareServer(status=constants.STATUS_DELETING) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.assertRaises(exc.HTTPForbidden, self.controller.delete, FakeRequestAdmin, share_server['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') manila-2.0.0/manila/tests/test_network.py0000664000567000056710000001031712701407107021621 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila import network from manila import test CONF = cfg.CONF @ddt.ddt class APITestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.mock_object(importutils, 'import_class') def test_init_api_with_default_config_group_name(self): network.API() importutils.import_class.assert_called_once_with( CONF.network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=None, label='user') def test_init_api_with_custom_config_group_name(self): group_name = 'FOO_GROUP_NAME' network.API(config_group_name=group_name) importutils.import_class.assert_called_once_with( getattr(CONF, group_name).network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=group_name, label='user') def test_init_api_with_custom_config_group_name_and_label(self): group_name = 'FOO_GROUP_NAME' label = 'custom_label' network.API(config_group_name=group_name, label=label) importutils.import_class.assert_called_once_with( getattr(CONF, group_name).network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=group_name, label=label) @ddt.ddt class NetworkBaseAPITestCase(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.db_driver = 'fake_driver' self.mock_object(importutils, 'import_module') def test_inherit_network_base_api_no_redefinitions(self): class FakeNetworkAPI(network.NetworkBaseAPI): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api_deallocate_not_redefined(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api_allocate_not_redefined(self): class FakeNetworkAPI(network.NetworkBaseAPI): def deallocate_network(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass result = FakeNetworkAPI() self.assertTrue(hasattr(result, '_verify_share_network')) self.assertTrue(hasattr(result, 'allocate_network')) self.assertTrue(hasattr(result, 'deallocate_network')) def test__verify_share_network_ok(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass result = FakeNetworkAPI() result._verify_share_network('foo_id', {'id': 'bar_id'}) def test__verify_share_network_fail(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass result = FakeNetworkAPI() self.assertRaises( exception.NetworkBadConfigurationException, result._verify_share_network, 'foo_id', None) manila-2.0.0/manila/tests/fake_volume.py0000664000567000056710000000360312701407107021366 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF class FakeVolume(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_vol_id') self.status = kwargs.pop('status', 'available') self.device = kwargs.pop('device', '') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeVolumeSnapshot(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_volsnap_id') self.status = kwargs.pop('status', 'available') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class API(object): """Fake Volume API.""" def get(self, *args, **kwargs): pass def create_snapshot_force(self, *args, **kwargs): pass def get_snapshot(self, *args, **kwargs): pass def delete_snapshot(self, *args, **kwargs): pass def create(self, *args, **kwargs): pass def extend(self, *args, **kwargs): pass def get_all(self, search_opts): pass def delete(self, volume_id): pass def get_all_snapshots(self, search_opts): pass manila-2.0.0/manila/tests/consistency_group/0000775000567000056710000000000012701407265022277 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/consistency_group/__init__.py0000664000567000056710000000000012701407107024371 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/tests/consistency_group/test_api.py0000664000567000056710000014432312701407107024463 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share API module.""" import copy import datetime import ddt import mock from oslo_config import cfg from oslo_utils import timeutils from manila.common import constants import manila.consistency_group.api as cg_api from manila import context from manila import db as db_driver from manila import exception from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs CONF = cfg.CONF def fake_cg(id, **kwargs): cg = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'host': None, 'source_cgsnapshot_id': None, 'share_network_id': None, 'share_types': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } cg.update(kwargs) return cg def fake_cgsnapshot(id, **kwargs): snap = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'consistency_group_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } snap.update(kwargs) return snap @ddt.ddt class CGAPITestCase(test.TestCase): def setUp(self): super(CGAPITestCase, self).setUp() self.context = context.get_admin_context() self.scheduler_rpcapi = mock.Mock() self.share_rpcapi = mock.Mock() self.share_api = mock.Mock() self.api = cg_api.API() self.mock_object(self.api, 'share_rpcapi', self.share_rpcapi) self.mock_object(self.api, 'share_api', self.share_api) self.mock_object(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) dt_utc = datetime.datetime.utcnow() self.mock_object(timeutils, 'utcnow', mock.Mock(return_value=dt_utc)) def test_create_empty_request(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.api.create(self.context) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) def test_create_request_spec(self): """Ensure the correct values are sent to the scheduler.""" cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_request_spec = { 'consistency_group_id': cg['id'], } expected_request_spec.update(cg) del expected_request_spec['id'] del expected_request_spec['created_at'] del expected_request_spec['host'] expected_request_spec['share_types'] = [] self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.api.create(self.context) self.scheduler_rpcapi.create_consistency_group.assert_called_once_with( self.context, cg_id=cg['id'], request_spec=expected_request_spec, filter_properties={} ) def test_create_with_name(self): fake_name = 'fake_name' cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['name'] = fake_name self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'share_network_get') self.api.create(self.context, name=fake_name) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) self.scheduler_rpcapi.create_consistency_group.assert_called_once_with( self.context, cg_id=cg['id'], request_spec=mock.ANY, filter_properties={} ) def test_create_with_description(self): fake_desc = 'fake_desc' cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['description'] = fake_desc self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.api.create(self.context, description=fake_desc) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) def test_create_with_multiple_share_types(self): fake_share_type = {'name': 'default', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9'} fake_share_type_2 = {'name': 'default2', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb7df9'} fake_share_types = [fake_share_type, fake_share_type_2] self.mock_object(share_types, 'get_share_type') cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['share_types'] = fake_share_types self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'share_network_get') self.api.create(self.context, share_type_ids=fake_share_types) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) def test_create_with_share_type_not_found(self): fake_share_type = {'name': 'default', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9'} fake_share_types = [fake_share_type] self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=exception.ShareTypeNotFound( share_type_id=fake_share_type['id']))) cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['share_types'] = fake_share_types self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_type_ids=[fake_share_type['id']]) def test_create_driver_handles_share_servers_is_false_with_net_id(self): fake_share_type = {'name': 'default', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': False, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9'} fake_share_types = [fake_share_type] self.mock_object(share_types, 'get_share_type') self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_types, share_network_id="fake_share_network") def test_create_with_conflicting_share_types(self): fake_share_type = {'name': 'default', 'extra_specs': { 'driver_handles_share_servers': 'True'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9'} fake_share_type_2 = {'name': 'default2', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb7df9'} fake_share_types = [fake_share_type, fake_share_type_2] self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=[fake_share_type, fake_share_type_2])) self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_types) def test_create_with_conflicting_share_type_and_share_network(self): fake_share_type = {'name': 'default', 'extra_specs': { 'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9'} fake_share_types = [fake_share_type] self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_types, share_network_id="fake_sn") def test_create_with_source_cgsnapshot_id(self): snap = fake_cgsnapshot("fake_source_cgsnapshot_id", status=constants.STATUS_AVAILABLE) fake_share_type_mapping = {'share_type_id': "fake_share_type_id"} orig_cg = fake_cg('fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE, host='fake_original_host') cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING, host='fake_original_host') expected_values = cg.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_values['source_cgsnapshot_id'] = snap['id'] expected_values['share_types'] = ["fake_share_type_id"] self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=orig_cg)) self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={"id": "fake_share_type_id"})) self.mock_object(db_driver, 'share_network_get') self.mock_object(db_driver, 'cgsnapshot_members_get_all', mock.Mock(return_value=[])) self.api.create(self.context, source_cgsnapshot_id=snap['id']) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_consistency_group.\ assert_called_once_with(self.context, cg, orig_cg['host']) def test_create_with_source_cgsnapshot_id_with_member(self): snap = fake_cgsnapshot("fake_source_cgsnapshot_id", status=constants.STATUS_AVAILABLE) share = stubs.stub_share('fakeshareid') member = stubs.stub_cgsnapshot_member('fake_member_id') fake_share_type_mapping = {'share_type_id': "fake_share_type_id"} orig_cg = fake_cg('fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE) cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_values['source_cgsnapshot_id'] = snap['id'] expected_values['share_types'] = ["fake_share_type_id"] self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=orig_cg)) self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={"id": "fake_share_type_id"})) self.mock_object(db_driver, 'share_network_get') self.mock_object(db_driver, 'share_instance_get', mock.Mock(return_value=share)) self.mock_object(db_driver, 'cgsnapshot_members_get_all', mock.Mock(return_value=[member])) self.mock_object(self.share_api, 'create') self.api.create(self.context, source_cgsnapshot_id=snap['id']) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) self.assertTrue(self.share_api.create.called) self.share_rpcapi.create_consistency_group.\ assert_called_once_with(self.context, cg, orig_cg['host']) def test_create_with_source_cgsnapshot_id_with_members_error(self): snap = fake_cgsnapshot("fake_source_cgsnapshot_id", status=constants.STATUS_AVAILABLE) member = stubs.stub_cgsnapshot_member('fake_member_id') member_2 = stubs.stub_cgsnapshot_member('fake_member2_id') share = stubs.stub_share('fakeshareid') fake_share_type_mapping = {'share_type_id': "fake_share_type_id"} orig_cg = fake_cg('fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE) cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_values['source_cgsnapshot_id'] = snap['id'] expected_values['share_types'] = ["fake_share_type_id"] self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=orig_cg)) self.mock_object(db_driver, 'share_network_get') self.mock_object(db_driver, 'share_instance_get', mock.Mock(return_value=share)) self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={"id": "fake_share_type_id"})) self.mock_object(db_driver, 'cgsnapshot_members_get_all', mock.Mock(return_value=[member, member_2])) self.mock_object(self.share_api, 'create', mock.Mock(side_effect=[None, exception.Error])) self.mock_object(db_driver, 'consistency_group_destroy') self.assertRaises(exception.Error, self.api.create, self.context, source_cgsnapshot_id=snap['id']) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) self.assertEqual(2, self.share_api.create.call_count) self.assertEqual(1, db_driver.consistency_group_destroy.call_count) def test_create_with_source_cgsnapshot_id_error_snapshot_status(self): snap = fake_cgsnapshot("fake_source_cgsnapshot_id", status=constants.STATUS_ERROR) self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(return_value=snap)) self.assertRaises(exception.InvalidCGSnapshot, self.api.create, self.context, source_cgsnapshot_id=snap['id']) def test_create_with_source_cgsnapshot_id_snap_not_found(self): snap = fake_cgsnapshot("fake_source_cgsnapshot_id", status=constants.STATUS_ERROR) self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(side_effect=exception.CGSnapshotNotFound( cgsnapshot_id='fake_source_cgsnapshot_id' ))) self.assertRaises(exception.CGSnapshotNotFound, self.api.create, self.context, source_cgsnapshot_id=snap['id']) def test_create_with_multiple_fields(self): fake_desc = 'fake_desc' fake_name = 'fake_name' cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['name'] = fake_name expected_values['description'] = fake_desc self.mock_object(db_driver, 'consistency_group_create', mock.Mock(return_value=cg)) self.api.create(self.context, name=fake_name, description=fake_desc) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) def test_create_with_error_on_creation(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = cg.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_create', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.api.create, self.context) db_driver.consistency_group_create.assert_called_once_with( self.context, expected_values) def test_delete_creating_no_host(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object(db_driver, 'consistency_group_destroy') self.api.delete(self.context, cg) db_driver.consistency_group_destroy.assert_called_once_with( mock.ANY, cg['id']) def test_delete_creating_with_host(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING, host="fake_host") self.assertRaises(exception.InvalidConsistencyGroup, self.api.delete, self.context, cg) def test_delete_available(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host="fake_host") deleted_cg = copy.deepcopy(cg) deleted_cg['status'] = constants.STATUS_DELETING self.mock_object(db_driver, 'consistency_group_update', mock.Mock(return_value=deleted_cg)) self.mock_object(db_driver, 'count_shares_in_consistency_group', mock.Mock(return_value=0)) self.api.delete(self.context, cg) db_driver.consistency_group_update.assert_called_once_with( self.context, cg['id'], {'status': constants.STATUS_DELETING}) self.share_rpcapi.delete_consistency_group.assert_called_once_with( self.context, deleted_cg ) def test_delete_error_with_host(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_ERROR, host="fake_host") deleted_cg = copy.deepcopy(cg) deleted_cg['status'] = constants.STATUS_DELETING self.mock_object(self.api, 'share_rpcapi') self.mock_object(db_driver, 'consistency_group_update', mock.Mock(return_value=deleted_cg)) self.mock_object(db_driver, 'count_shares_in_consistency_group', mock.Mock(return_value=0)) self.api.delete(self.context, cg) db_driver.consistency_group_update.assert_called_once_with( self.context, cg['id'], {'status': constants.STATUS_DELETING}) self.api.share_rpcapi.delete_consistency_group.assert_called_once_with( self.context, deleted_cg ) def test_delete_error_without_host(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_ERROR) self.mock_object(db_driver, 'consistency_group_destroy') self.api.delete(self.context, cg) db_driver.consistency_group_destroy.assert_called_once_with( mock.ANY, cg['id']) def test_delete_with_shares(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host="fake_host") self.mock_object(db_driver, 'count_shares_in_consistency_group', mock.Mock(return_value=1)) self.assertRaises(exception.InvalidConsistencyGroup, self.api.delete, self.context, cg) def test_delete_with_cgsnapshots(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host="fake_host") self.mock_object(db_driver, 'count_cgsnapshots_in_consistency_group', mock.Mock(return_value=1)) self.assertRaises(exception.InvalidConsistencyGroup, self.api.delete, self.context, cg) def test_update_no_values(self): cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = {} self.mock_object(db_driver, 'consistency_group_update', mock.Mock(return_value=cg)) self.api.update(self.context, cg, expected_values) db_driver.consistency_group_update.assert_called_once_with( self.context, cg['id'], expected_values) def test_update_with_name(self): fake_name = 'fake_name' cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = {'description': fake_name} self.mock_object(db_driver, 'consistency_group_update', mock.Mock(return_value=cg)) self.api.update(self.context, cg, expected_values) db_driver.consistency_group_update.assert_called_once_with( self.context, cg['id'], expected_values) def test_update_with_description(self): fake_desc = 'fake_desc' cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = {'description': fake_desc} self.mock_object(db_driver, 'consistency_group_update', mock.Mock(return_value=cg)) self.api.update(self.context, cg, expected_values) db_driver.consistency_group_update.assert_called_once_with( self.context, cg['id'], expected_values) def test_get(self): expected_cg = fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=expected_cg)) actual_cg = self.api.get(self.context, expected_cg['id']) self.assertEqual(expected_cg, actual_cg) def test_get_all_no_cgs(self): self.mock_object(db_driver, 'consistency_group_get_all', mock.Mock(return_value=[])) actual_cg = self.api.get_all(self.context) self.assertEqual([], actual_cg) def test_get_all(self): expected_cgs = [fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'consistency_group_get_all_by_project', mock.Mock(return_value=expected_cgs)) actual_cg = self.api.get_all(self.context, detailed=True) self.assertEqual(expected_cgs, actual_cg) def test_get_all_all_tenants_not_admin(self): cxt = context.RequestContext(user_id=None, project_id=None, is_admin=False) expected_cgs = [fake_cg('fakeid', user_id=cxt.user_id, project_id=cxt.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'consistency_group_get_all_by_project', mock.Mock(return_value=expected_cgs)) actual_cgs = self.api.get_all(cxt, search_opts={'all_tenants': True}) self.assertEqual(expected_cgs, actual_cgs) def test_get_all_all_tenants_as_admin(self): expected_cgs = [fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'consistency_group_get_all', mock.Mock(return_value=expected_cgs)) actual_cgs = self.api.get_all(self.context, search_opts={'all_tenants': True}) self.assertEqual(expected_cgs, actual_cgs) db_driver.consistency_group_get_all.assert_called_once_with( self.context, detailed=True) def test_create_cgsnapshot_minimal_request_no_members(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[])) self.api.create_cgsnapshot(self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_cgsnapshot.assert_called_once_with( self.context, snap, cg['host'] ) def test_create_cgsnapshot_minimal_request_no_members_with_name(self): fake_name = 'fake_name' cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], name=fake_name, status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[])) self.api.create_cgsnapshot(self.context, consistency_group_id=cg['id'], name=fake_name) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_cgsnapshot.assert_called_once_with( self.context, snap, cg['host'] ) def test_create_cgsnapshot_minimal_request_no_members_with_desc(self): fake_description = 'fake_description' cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], description=fake_description, status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[])) self.api.create_cgsnapshot(self.context, consistency_group_id=cg['id'], description=fake_description) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_cgsnapshot.assert_called_once_with( self.context, snap, cg['host'] ) def test_create_cgsnapshot_cg_does_not_exist(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[])) self.assertRaises(exception.InvalidConsistencyGroup, self.api.create_cgsnapshot, self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) def test_create_cgsnapshot_cg_in_creating(self): self.mock_object(db_driver, 'consistency_group_get', mock.Mock( side_effect=exception.ConsistencyGroupNotFound( consistency_group_id='fake_id' ))) self.assertRaises(exception.ConsistencyGroupNotFound, self.api.create_cgsnapshot, self.context, consistency_group_id="fake_id") db_driver.consistency_group_get.assert_called_once_with( self.context, "fake_id" ) def test_create_cgsnapshot_with_member(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], status=constants.STATUS_CREATING) share = stubs.stub_share('fake_share_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_values = { 'cgsnapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_type_id': share['share_type_id'], 'share_id': share['id'], 'share_instance_id': mock.ANY, } self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'cgsnapshot_member_create', mock.Mock()) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[share])) self.api.create_cgsnapshot(self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) db_driver.cgsnapshot_member_create.assert_called_once_with( self.context, expected_member_values ) self.share_rpcapi.create_cgsnapshot.assert_called_once_with( self.context, snap, cg['host'] ) def test_create_cgsnapshot_with_member_share_in_creating(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) share = stubs.stub_share('fake_share_id', status=constants.STATUS_CREATING) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[share])) self.assertRaises(exception.InvalidConsistencyGroup, self.api.create_cgsnapshot, self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) def test_create_cgsnapshot_with_two_members(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], status=constants.STATUS_CREATING) share = stubs.stub_share('fake_share_id', status=constants.STATUS_AVAILABLE) share_2 = stubs.stub_share('fake_share2_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_1_values = { 'cgsnapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_type_id': share['share_type_id'], 'share_id': share['id'], 'share_instance_id': mock.ANY, } expected_member_2_values = { 'cgsnapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share_2['size'], 'share_proto': share_2['share_proto'], 'share_type_id': share_2['share_type_id'], 'share_id': share_2['id'], 'share_instance_id': mock.ANY, } self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[share, share_2])) self.mock_object(db_driver, 'cgsnapshot_member_create', mock.Mock()) self.api.create_cgsnapshot(self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) db_driver.cgsnapshot_member_create.assert_any_call( self.context, expected_member_1_values ) db_driver.cgsnapshot_member_create.assert_any_call( self.context, expected_member_2_values ) self.share_rpcapi.create_cgsnapshot.assert_called_once_with( self.context, snap, cg['host'] ) def test_create_cgsnapshot_error_creating_member(self): cg = fake_cg('fake_cg_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, consistency_group_id=cg['id'], status=constants.STATUS_CREATING) share = stubs.stub_share('fake_share_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_values = { 'cgsnapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_type_id': share['share_type_id'], 'share_id': share['id'], 'share_instance_id': mock.ANY, } self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'cgsnapshot_destroy') self.mock_object(db_driver, 'cgsnapshot_member_create', mock.Mock(side_effect=exception.Error)) self.mock_object(db_driver, 'share_get_all_by_consistency_group_id', mock.Mock(return_value=[share])) self.assertRaises(exception.Error, self.api.create_cgsnapshot, self.context, consistency_group_id=cg['id']) db_driver.consistency_group_get.assert_called_once_with( self.context, cg['id'] ) db_driver.cgsnapshot_create.assert_called_once_with( self.context, expected_values) db_driver.cgsnapshot_member_create.assert_called_once_with( self.context, expected_member_values ) db_driver.cgsnapshot_destroy.assert_called_once_with( self.context, snap['id'] ) def test_delete_cgsnapshot(self): cg = fake_cg('fake_id', host="fake_host") snap = fake_cgsnapshot('fake_cgsnap_id', consistency_group_id='fake_id', status=constants.STATUS_AVAILABLE) self.mock_object(db_driver, 'consistency_group_get', mock.Mock(return_value=cg)) self.mock_object(db_driver, 'cgsnapshot_update') self.api.delete_cgsnapshot(self.context, snap) db_driver.consistency_group_get.assert_called_once_with( self.context, "fake_id" ) db_driver.cgsnapshot_update.assert_called_once_with( self.context, snap['id'], {'status': constants.STATUS_DELETING} ) self.share_rpcapi.delete_cgsnapshot.assert_called_once_with( self.context, snap, cg['host']) def test_delete_cgsnapshot_cg_does_not_exist(self): snap = fake_cgsnapshot('fake_cgsnap_id', consistency_group_id='fake_id') self.mock_object(db_driver, 'consistency_group_get', mock.Mock( side_effect=exception.ConsistencyGroupNotFound( consistency_group_id='fake_id' ))) self.assertRaises(exception.ConsistencyGroupNotFound, self.api.delete_cgsnapshot, self.context, snap) db_driver.consistency_group_get.assert_called_once_with( self.context, "fake_id" ) def test_delete_cgsnapshot_creating_status(self): snap = fake_cgsnapshot('fake_cgsnap_id', consistency_group_id='fake_id', status=constants.STATUS_CREATING) self.mock_object(db_driver, 'consistency_group_get') self.assertRaises(exception.InvalidCGSnapshot, self.api.delete_cgsnapshot, self.context, snap) db_driver.consistency_group_get.assert_called_once_with( self.context, "fake_id" ) def test_update_cgsnapshot_no_values(self): snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = {} self.mock_object(db_driver, 'cgsnapshot_update', mock.Mock(return_value=snap)) self.api.update_cgsnapshot(self.context, snap, expected_values) db_driver.cgsnapshot_update.assert_called_once_with( self.context, snap['id'], expected_values) def test_update_cgsnapshot_with_name(self): fake_name = 'fake_name' snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = {'description': fake_name} self.mock_object(db_driver, 'cgsnapshot_update', mock.Mock(return_value=snap)) self.api.update_cgsnapshot(self.context, snap, expected_values) db_driver.cgsnapshot_update.assert_called_once_with( self.context, snap['id'], expected_values) def test_cgsnapshot_get(self): expected_snap = fake_cgsnapshot('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object(db_driver, 'cgsnapshot_get', mock.Mock(return_value=expected_snap)) actual_cg = self.api.get_cgsnapshot(self.context, expected_snap['id']) self.assertEqual(expected_snap, actual_cg) def test_cgsnapshot_get_all_no_cgs(self): self.mock_object(db_driver, 'cgsnapshot_get_all', mock.Mock(return_value=[])) actual_cg = self.api.get_all_cgsnapshots(self.context) self.assertEqual([], actual_cg) def test_cgsnapshot_get_all(self): expected_snaps = [fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'cgsnapshot_get_all_by_project', mock.Mock(return_value=expected_snaps)) actual_cg = self.api.get_all_cgsnapshots(self.context, detailed=True) self.assertEqual(expected_snaps, actual_cg) def test_cgsnapshot_get_all_all_tenants_not_admin(self): cxt = context.RequestContext(user_id=None, project_id=None, is_admin=False) expected_snaps = [fake_cg('fakeid', user_id=cxt.user_id, project_id=cxt.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'cgsnapshot_get_all_by_project', mock.Mock(return_value=expected_snaps)) actual_cgs = self.api.get_all_cgsnapshots( cxt, search_opts={'all_tenants': True}) self.assertEqual(expected_snaps, actual_cgs) def test_cgsnapshot_get_all_all_tenants_as_admin(self): expected_snaps = [fake_cg('fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'cgsnapshot_get_all', mock.Mock(return_value=expected_snaps)) actual_cgs = self.api.get_all_cgsnapshots( self.context, search_opts={'all_tenants': True}) self.assertEqual(expected_snaps, actual_cgs) db_driver.cgsnapshot_get_all.assert_called_once_with( self.context, detailed=True) def test_get_all_cgsnapshot_members(self): self.mock_object(db_driver, 'cgsnapshot_members_get_all', mock.Mock(return_value=[])) self.api.get_all_cgsnapshot_members(self.context, 'fake_id') db_driver.cgsnapshot_members_get_all.assert_called_once_with( self.context, 'fake_id' ) manila-2.0.0/manila/tests/test_exception.py0000664000567000056710000005227312701407107022135 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import six from manila import exception from manila import test class FakeNotifier(object): """Acts like the manila.openstack.common.notifier.api module.""" ERROR = 88 def __init__(self): self.provided_publisher = None self.provided_event = None self.provided_priority = None self.provided_payload = None def notify(self, context, publisher, event, priority, payload): self.provided_publisher = publisher self.provided_event = event self.provided_priority = priority self.provided_payload = payload @ddt.ddt class ManilaExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeManilaException(exception.ManilaException): message = "default message" exc = FakeManilaException() self.assertEqual('default message', six.text_type(exc)) def test_error_msg(self): self.assertEqual('test', six.text_type(exception.ManilaException('test'))) def test_default_error_msg_with_kwargs(self): class FakeManilaException(exception.ManilaException): message = "default message: %(code)s" exc = FakeManilaException(code=500) self.assertEqual('default message: 500', six.text_type(exc)) def test_error_msg_exception_with_kwargs(self): # NOTE(dprince): disable format errors for this test self.flags(fatal_exception_format_errors=False) class FakeManilaException(exception.ManilaException): message = "default message: %(misspelled_code)s" exc = FakeManilaException(code=500) self.assertEqual('default message: %(misspelled_code)s', six.text_type(exc)) def test_default_error_code(self): class FakeManilaException(exception.ManilaException): code = 404 exc = FakeManilaException() self.assertEqual(404, exc.kwargs['code']) def test_error_code_from_kwarg(self): class FakeManilaException(exception.ManilaException): code = 500 exc = FakeManilaException(code=404) self.assertEqual(404, exc.kwargs['code']) def test_error_msg_is_exception_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_kwargs_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.ManilaException(kwarg1=exc1) self.assertEqual(msg, exc2.kwargs['kwarg1']) def test_exception_multi_kwargs_to_string(self): exc = exception.ManilaException( 'fake_msg', foo=Exception('foo_msg'), bar=Exception('bar_msg')) self.assertEqual('fake_msg', exc.msg) self.assertEqual('foo_msg', exc.kwargs['foo']) self.assertEqual('bar_msg', exc.kwargs['bar']) self.assertNotIn('fake_msg', exc.kwargs) self.assertNotIn('foo_msg', exc.kwargs) self.assertNotIn('bar_msg', exc.kwargs) @ddt.data("test message.", "test message....", ".") def test_exception_not_redundant_period(self, msg): exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_redundant_period(self): msg = "test message.." exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual("test message.", exc2.msg) def test_replication_exception(self): # Verify response code for exception.ReplicationException reason = "Something bad happened." e = exception.ReplicationException(reason=reason) self.assertEqual(500, e.code) self.assertIn(reason, e.msg) class ManilaExceptionResponseCode400(test.TestCase): def test_invalid(self): # Verify response code for exception.Invalid e = exception.Invalid() self.assertEqual(400, e.code) def test_invalid_input(self): # Verify response code for exception.InvalidInput reason = "fake_reason" e = exception.InvalidInput(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_request(self): # Verify response code for exception.InvalidRequest e = exception.InvalidRequest() self.assertEqual(400, e.code) def test_invalid_results(self): # Verify response code for exception.InvalidResults e = exception.InvalidResults() self.assertEqual(400, e.code) def test_invalid_uuid(self): # Verify response code for exception.InvalidUUID uuid = "fake_uuid" e = exception.InvalidUUID(uuid=uuid) self.assertEqual(400, e.code) self.assertIn(uuid, e.msg) def test_invalid_content_type(self): # Verify response code for exception.InvalidContentType content_type = "fake_content_type" e = exception.InvalidContentType(content_type=content_type) self.assertEqual(400, e.code) self.assertIn(content_type, e.msg) def test_invalid_parameter_value(self): # Verify response code for exception.InvalidParameterValue err = "fake_err" e = exception.InvalidParameterValue(err=err) self.assertEqual(400, e.code) self.assertIn(err, e.msg) def test_invalid_reservation_expiration(self): # Verify response code for exception.InvalidReservationExpiration expire = "fake_expire" e = exception.InvalidReservationExpiration(expire=expire) self.assertEqual(400, e.code) self.assertIn(expire, e.msg) def test_invalid_quota_value(self): # Verify response code for exception.InvalidQuotaValue unders = '-1' e = exception.InvalidQuotaValue(unders=unders) self.assertEqual(400, e.code) def test_invalid_share(self): # Verify response code for exception.InvalidShare reason = "fake_reason" e = exception.InvalidShare(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_access(self): # Verify response code for exception.InvalidShareAccess reason = "fake_reason" e = exception.InvalidShareAccess(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_snapshot(self): # Verify response code for exception.InvalidShareSnapshot reason = "fake_reason" e = exception.InvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_metadata(self): # Verify response code for exception.InvalidShareMetadata e = exception.InvalidShareMetadata() self.assertEqual(400, e.code) def test_invalid_share_metadata_size(self): # Verify response code for exception.InvalidShareMetadataSize e = exception.InvalidShareMetadataSize() self.assertEqual(400, e.code) def test_invalid_volume(self): # Verify response code for exception.InvalidVolume e = exception.InvalidVolume() self.assertEqual(400, e.code) def test_invalid_share_type(self): # Verify response code for exception.InvalidShareType reason = "fake_reason" e = exception.InvalidShareType(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_manage_invalid_share_snapshot(self): # Verify response code for exception.ManageInvalidShareSnapshot reason = "fake_reason" e = exception.ManageInvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_unmanage_invalid_share_snapshot(self): # Verify response code for exception.UnmanageInvalidShareSnapshot reason = "fake_reason" e = exception.UnmanageInvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) class ManilaExceptionResponseCode403(test.TestCase): def test_not_authorized(self): # Verify response code for exception.NotAuthorized e = exception.NotAuthorized() self.assertEqual(403, e.code) def test_admin_required(self): # Verify response code for exception.AdminRequired e = exception.AdminRequired() self.assertEqual(403, e.code) def test_policy_not_authorized(self): # Verify response code for exception.PolicyNotAuthorized action = "fake_action" e = exception.PolicyNotAuthorized(action=action) self.assertEqual(403, e.code) self.assertIn(action, e.msg) class ManilaExceptionResponseCode404(test.TestCase): def test_not_found(self): # Verify response code for exception.NotFound e = exception.NotFound() self.assertEqual(404, e.code) def test_share_network_not_found(self): # Verify response code for exception.ShareNetworkNotFound share_network_id = "fake_share_network_id" e = exception.ShareNetworkNotFound(share_network_id=share_network_id) self.assertEqual(404, e.code) self.assertIn(share_network_id, e.msg) def test_share_server_not_found(self): # Verify response code for exception.ShareServerNotFound share_server_id = "fake_share_server_id" e = exception.ShareServerNotFound(share_server_id=share_server_id) self.assertEqual(404, e.code) self.assertIn(share_server_id, e.msg) def test_share_server_not_found_by_filters(self): # Verify response code for exception.ShareServerNotFoundByFilters filters_description = "host = fakeHost" e = exception.ShareServerNotFoundByFilters( filters_description=filters_description) self.assertEqual(404, e.code) self.assertIn(filters_description, e.msg) def test_service_not_found(self): # Verify response code for exception.ServiceNotFound service_id = "fake_service_id" e = exception.ServiceNotFound(service_id=service_id) self.assertEqual(404, e.code) self.assertIn(service_id, e.msg) def test_host_not_found(self): # Verify response code for exception.HostNotFound host = "fake_host" e = exception.HostNotFound(host=host) self.assertEqual(404, e.code) self.assertIn(host, e.msg) def test_scheduler_host_filter_not_found(self): # Verify response code for exception.SchedulerHostFilterNotFound filter_name = "fake_filter_name" e = exception.SchedulerHostFilterNotFound(filter_name=filter_name) self.assertEqual(404, e.code) self.assertIn(filter_name, e.msg) def test_scheduler_host_weigher_not_found(self): # Verify response code for exception.SchedulerHostWeigherNotFound weigher_name = "fake_weigher_name" e = exception.SchedulerHostWeigherNotFound(weigher_name=weigher_name) self.assertEqual(404, e.code) self.assertIn(weigher_name, e.msg) def test_host_binary_not_found(self): # Verify response code for exception.HostBinaryNotFound host = "fake_host" binary = "fake_binary" e = exception.HostBinaryNotFound(binary=binary, host=host) self.assertEqual(404, e.code) self.assertIn(binary, e.msg) self.assertIn(host, e.msg) def test_quota_not_found(self): # Verify response code for exception.QuotaNotFound e = exception.QuotaNotFound() self.assertEqual(404, e.code) def test_quota_resource_unknown(self): # Verify response code for exception.QuotaResourceUnknown unknown = "fake_quota_resource" e = exception.QuotaResourceUnknown(unknown=unknown) self.assertEqual(404, e.code) def test_project_quota_not_found(self): # Verify response code for exception.ProjectQuotaNotFound project_id = "fake_tenant_id" e = exception.ProjectQuotaNotFound(project_id=project_id) self.assertEqual(404, e.code) def test_quota_class_not_found(self): # Verify response code for exception.QuotaClassNotFound class_name = "FakeQuotaClass" e = exception.QuotaClassNotFound(class_name=class_name) self.assertEqual(404, e.code) def test_quota_usage_not_found(self): # Verify response code for exception.QuotaUsageNotFound project_id = "fake_tenant_id" e = exception.QuotaUsageNotFound(project_id=project_id) self.assertEqual(404, e.code) def test_reservation_not_found(self): # Verify response code for exception.ReservationNotFound uuid = "fake_uuid" e = exception.ReservationNotFound(uuid=uuid) self.assertEqual(404, e.code) def test_migration_not_found(self): # Verify response code for exception.MigrationNotFound migration_id = "fake_migration_id" e = exception.MigrationNotFound(migration_id=migration_id) self.assertEqual(404, e.code) self.assertIn(migration_id, e.msg) def test_migration_not_found_by_status(self): # Verify response code for exception.MigrationNotFoundByStatus status = "fake_status" instance_id = "fake_instance_id" e = exception.MigrationNotFoundByStatus(status=status, instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(status, e.msg) self.assertIn(instance_id, e.msg) def test_file_not_found(self): # Verify response code for exception.FileNotFound file_path = "fake_file_path" e = exception.FileNotFound(file_path=file_path) self.assertEqual(404, e.code) self.assertIn(file_path, e.msg) def test_config_not_found(self): # Verify response code for exception.ConfigNotFound path = "fake_path" e = exception.ConfigNotFound(path=path) self.assertEqual(404, e.code) self.assertIn(path, e.msg) def test_paste_app_not_found(self): # Verify response code for exception.PasteAppNotFound name = "fake_name" path = "fake_path" e = exception.PasteAppNotFound(name=name, path=path) self.assertEqual(404, e.code) self.assertIn(name, e.msg) self.assertIn(path, e.msg) def test_share_snapshot_not_found(self): # Verify response code for exception.ShareSnapshotNotFound snapshot_id = "fake_snapshot_id" e = exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) self.assertEqual(404, e.code) self.assertIn(snapshot_id, e.msg) def test_share_metadata_not_found(self): # verify response code for exception.ShareMetadataNotFound e = exception.ShareMetadataNotFound() self.assertEqual(404, e.code) def test_security_service_not_found(self): # verify response code for exception.SecurityServiceNotFound security_service_id = "fake_security_service_id" e = exception.SecurityServiceNotFound( security_service_id=security_service_id) self.assertEqual(404, e.code) self.assertIn(security_service_id, e.msg) def test_volume_not_found(self): # verify response code for exception.VolumeNotFound volume_id = "fake_volume_id" e = exception.VolumeNotFound(volume_id=volume_id) self.assertEqual(404, e.code) self.assertIn(volume_id, e.msg) def test_volume_snapshot_not_found(self): # verify response code for exception.VolumeSnapshotNotFound snapshot_id = "fake_snapshot_id" e = exception.VolumeSnapshotNotFound(snapshot_id=snapshot_id) self.assertEqual(404, e.code) self.assertIn(snapshot_id, e.msg) def test_share_type_not_found(self): # verify response code for exception.ShareTypeNotFound share_type_id = "fake_share_type_id" e = exception.ShareTypeNotFound(share_type_id=share_type_id) self.assertEqual(404, e.code) self.assertIn(share_type_id, e.msg) def test_share_type_not_found_by_name(self): # verify response code for exception.ShareTypeNotFoundByName share_type_name = "fake_share_type_name" e = exception.ShareTypeNotFoundByName( share_type_name=share_type_name) self.assertEqual(404, e.code) self.assertIn(share_type_name, e.msg) def test_share_type_extra_specs_not_found(self): # verify response code for exception.ShareTypeExtraSpecsNotFound share_type_id = "fake_share_type_id" extra_specs_key = "fake_extra_specs_key" e = exception.ShareTypeExtraSpecsNotFound( share_type_id=share_type_id, extra_specs_key=extra_specs_key) self.assertEqual(404, e.code) self.assertIn(share_type_id, e.msg) self.assertIn(extra_specs_key, e.msg) def test_instance_not_found(self): # verify response code for exception.InstanceNotFound instance_id = "fake_instance_id" e = exception.InstanceNotFound(instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(instance_id, e.msg) def test_share_replica_not_found_exception(self): # Verify response code for exception.ShareReplicaNotFound replica_id = "FAKE_REPLICA_ID" e = exception.ShareReplicaNotFound(replica_id=replica_id) self.assertEqual(404, e.code) self.assertIn(replica_id, e.msg) def test_storage_resource_not_found(self): # verify response code for exception.StorageResourceNotFound name = "fake_name" e = exception.StorageResourceNotFound(name=name) self.assertEqual(404, e.code) self.assertIn(name, e.msg) def test_snapshot_resource_not_found(self): # verify response code for exception.SnapshotResourceNotFound name = "fake_name" e = exception.SnapshotResourceNotFound(name=name) self.assertEqual(404, e.code) self.assertIn(name, e.msg) def test_snapshot_instance_not_found(self): # verify response code for exception.ShareSnapshotInstanceNotFound instance_id = 'fake_instance_id' e = exception.ShareSnapshotInstanceNotFound(instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(instance_id, e.msg) def test_export_location_not_found(self): # verify response code for exception.ExportLocationNotFound uuid = "fake-export-location-uuid" e = exception.ExportLocationNotFound(uuid=uuid) self.assertEqual(404, e.code) self.assertIn(uuid, e.msg) def test_share_resource_not_found(self): # verify response code for exception.ShareResourceNotFound share_id = "fake_share_id" e = exception.ShareResourceNotFound(share_id=share_id) self.assertEqual(404, e.code) self.assertIn(share_id, e.msg) def test_share_not_found(self): # verify response code for exception.ShareNotFound share_id = "fake_share_id" e = exception.ShareNotFound(share_id=share_id) self.assertEqual(404, e.code) self.assertIn(share_id, e.msg) class ManilaExceptionResponseCode413(test.TestCase): def test_quota_error(self): # verify response code for exception.QuotaError e = exception.QuotaError() self.assertEqual(413, e.code) def test_share_size_exceeds_available_quota(self): # verify response code for exception.ShareSizeExceedsAvailableQuota e = exception.ShareSizeExceedsAvailableQuota() self.assertEqual(413, e.code) def test_share_limit_exceeded(self): # verify response code for exception.ShareLimitExceeded allowed = 776 # amount of allowed shares e = exception.ShareLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_snapshot_limit_exceeded(self): # verify response code for exception.SnapshotLimitExceeded allowed = 777 # amount of allowed snapshots e = exception.SnapshotLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_share_networks_limit_exceeded(self): # verify response code for exception.ShareNetworksLimitExceeded allowed = 778 # amount of allowed share networks e = exception.ShareNetworksLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_port_limit_exceeded(self): # verify response code for exception.PortLimitExceeded e = exception.PortLimitExceeded() self.assertEqual(413, e.code) manila-2.0.0/manila/policy.py0000664000567000056710000000660312701407107017231 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Manila""" import functools from oslo_config import cfg from oslo_policy import policy from manila import exception CONF = cfg.CONF _ENFORCER = None def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(policy_path=None): global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF) if policy_path: _ENFORCER.policy_path = policy_path _ENFORCER.load_rules() def enforce(context, action, target, do_raise=True): """Verifies that the action is valid on the target in this context. :param context: manila context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param object: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :raises manila.exception.PolicyNotAuthorized: if verification fails. """ init() if not isinstance(context, dict): context = context.to_dict() # Add the exception arguments if asked to do a raise extra = {} if do_raise: extra.update(exc=exception.PolicyNotAuthorized, action=action, do_raise=do_raise) return _ENFORCER.enforce(action, target, context, **extra) def check_is_admin(roles): """Whether or not roles contains 'admin' role according to policy setting. """ init() # include project_id on target to avoid KeyError if context_is_admin # policy definition is missing, and default admin_or_owner rule # attempts to apply. Since our credentials dict does not include a # project_id, this target can never match as a generic rule. target = {'project_id': ''} credentials = {'roles': roles} return _ENFORCER.enforce("context_is_admin", target, credentials) def wrap_check_policy(resource): """Check policy corresponding to the wrapped methods prior to execution.""" def check_policy_wraper(func): @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, resource, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped return check_policy_wraper def check_policy(context, resource, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } target.update(target_obj or {}) _action = '%s:%s' % (resource, action) enforce(context, _action, target) manila-2.0.0/manila/data/0000775000567000056710000000000012701407265016271 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/data/utils.py0000664000567000056710000001260712701407107020004 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log import six from manila import utils LOG = log.getLogger(__name__) class Copy(object): def __init__(self, src, dest, ignore_list): self.src = src self.dest = dest self.total_size = 0 self.current_size = 0 self.files = [] self.dirs = [] self.current_copy = None self.ignore_list = ignore_list self.cancelled = False def get_progress(self): if self.current_copy is not None: try: size, err = utils.execute("stat", "-c", "%s", self.current_copy['file_path'], run_as_root=True) size = int(size) except utils.processutils.ProcessExecutionError: size = 0 total_progress = 0 if self.total_size > 0: total_progress = self.current_size * 100 / self.total_size current_file_progress = 0 if self.current_copy['size'] > 0: current_file_progress = size * 100 / self.current_copy['size'] current_file_path = self.current_copy['file_path'] progress = { 'total_progress': total_progress, 'current_file_path': current_file_path, 'current_file_progress': current_file_progress } return progress else: return {'total_progress': 100} def cancel(self): self.cancelled = True def run(self): self.get_total_size(self.src) self.copy_data(self.src) self.copy_stats(self.src) LOG.info(six.text_type(self.get_progress())) def get_total_size(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.get_total_size(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.total_size += int(size) def copy_data(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue utils.execute("mkdir", "-p", dest_item, run_as_root=True) self.copy_data(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.current_copy = {'file_path': dest_item, 'size': int(size)} utils.execute("cp", "-P", "--preserve=all", src_item, dest_item, run_as_root=True) self.current_size += int(size) LOG.info(six.text_type(self.get_progress())) def copy_stats(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) # NOTE(ganso): Should re-apply attributes for folders. if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.copy_stats(src_item) utils.execute("chmod", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("touch", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("chown", "--reference=%s" % src_item, dest_item, run_as_root=True) manila-2.0.0/manila/data/__init__.py0000664000567000056710000000000012701407107020363 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/data/helper.py0000664000567000056710000001763412701407107020130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper class for Data Service operations.""" import os from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _, _LW from manila.share import rpcapi as share_rpc from manila import utils LOG = log.getLogger(__name__) data_helper_opts = [ cfg.IntOpt( 'data_access_wait_access_rules_timeout', default=180, help="Time to wait for access rules to be allowed/denied on backends " "when migrating a share (seconds)."), cfg.StrOpt( 'data_node_access_ip', default=None, help="The IP of the node interface connected to the admin network. " "Used for allowing access to the mounting shares."), cfg.StrOpt( 'data_node_access_cert', default=None, help="The certificate installed in the data node in order to " "allow access to certificate authentication-based shares."), ] CONF = cfg.CONF CONF.register_opts(data_helper_opts) class DataServiceHelper(object): def __init__(self, context, db, share): self.db = db self.share = share self.context = context self.share_rpc = share_rpc.ShareAPI() self.wait_access_rules_timeout = ( CONF.data_access_wait_access_rules_timeout) def _allow_data_access(self, access, share_instance_id, dest_share_instance_id=None): values = { 'share_id': self.share['id'], 'access_type': access['access_type'], 'access_level': access['access_level'], 'access_to': access['access_to'] } share_access_list = self.db.share_access_get_all_by_type_and_access( self.context, self.share['id'], access['access_type'], access['access_to']) for access in share_access_list: self._change_data_access_to_instance( share_instance_id, access, allow=False) access_ref = self.db.share_access_create(self.context, values) self._change_data_access_to_instance( share_instance_id, access_ref, allow=True) if dest_share_instance_id: self._change_data_access_to_instance( dest_share_instance_id, access_ref, allow=True) return access_ref def deny_access_to_data_service(self, access_ref, share_instance_id): self._change_data_access_to_instance( share_instance_id, access_ref, allow=False) # NOTE(ganso): Cleanup methods do not throw exceptions, since the # exceptions that should be thrown are the ones that call the cleanup def cleanup_data_access(self, access_ref, share_instance_id): try: self.deny_access_to_data_service(access_ref, share_instance_id) except Exception: LOG.warning(_LW("Could not cleanup access rule of share %s."), self.share['id']) def cleanup_temp_folder(self, instance_id, mount_path): try: path = os.path.join(mount_path, instance_id) if os.path.exists(path): os.rmdir(path) self._check_dir_not_exists(path) except Exception: LOG.warning(_LW("Could not cleanup instance %(instance_id)s " "temporary folders for data copy of " "share %(share_id)s."), { 'instance_id': instance_id, 'share_id': self.share['id']}) def cleanup_unmount_temp_folder(self, unmount_template, mount_path, share_instance_id): try: self.unmount_share_instance(unmount_template, mount_path, share_instance_id) except Exception: LOG.warning(_LW("Could not unmount folder of instance" " %(instance_id)s for data copy of " "share %(share_id)s."), { 'instance_id': share_instance_id, 'share_id': self.share['id']}) def _change_data_access_to_instance( self, instance_id, access_ref, allow=False): self.db.share_instance_update_access_status( self.context, instance_id, constants.STATUS_OUT_OF_SYNC) instance = self.db.share_instance_get( self.context, instance_id, with_share_data=True) if allow: self.share_rpc.allow_access(self.context, instance, access_ref) else: self.share_rpc.deny_access(self.context, instance, access_ref) utils.wait_for_access_update( self.context, self.db, instance, self.wait_access_rules_timeout) def allow_access_to_data_service(self, share, share_instance_id, dest_share_instance_id): if share['share_proto'].upper() == 'GLUSTERFS': access_to = CONF.data_node_access_cert access_type = 'cert' if not access_to: msg = _("Data Node Certificate not specified. Cannot mount " "instances for data copy of share %(share_id)s. " "Aborting.") % {'share_id': share['id']} raise exception.ShareDataCopyFailed(reason=msg) else: access_to = CONF.data_node_access_ip access_type = 'ip' if not access_to: msg = _("Data Node Admin Network IP not specified. Cannot " "mount instances for data copy of share %(share_id)s. " "Aborting.") % {'share_id': share['id']} raise exception.ShareDataCopyFailed(reason=msg) access = {'access_type': access_type, 'access_level': constants.ACCESS_LEVEL_RW, 'access_to': access_to} access_ref = self._allow_data_access(access, share_instance_id, dest_share_instance_id) return access_ref @utils.retry(exception.NotFound, 0.1, 10, 0.1) def _check_dir_exists(self, path): if not os.path.exists(path): raise exception.NotFound("Folder %s could not be found." % path) @utils.retry(exception.Found, 0.1, 10, 0.1) def _check_dir_not_exists(self, path): if os.path.exists(path): raise exception.Found("Folder %s was found." % path) def mount_share_instance(self, mount_template, mount_path, share_instance_id): path = os.path.join(mount_path, share_instance_id) if not os.path.exists(path): os.makedirs(path) self._check_dir_exists(path) mount_command = mount_template % {'path': path} utils.execute(*(mount_command.split()), run_as_root=True) def unmount_share_instance(self, unmount_template, mount_path, share_instance_id): path = os.path.join(mount_path, share_instance_id) unmount_command = unmount_template % {'path': path} utils.execute(*(unmount_command.split()), run_as_root=True) try: if os.path.exists(path): os.rmdir(path) self._check_dir_not_exists(path) except Exception: LOG.warning(_LW("Folder %s could not be removed."), path) manila-2.0.0/manila/data/manager.py0000664000567000056710000002733512701407107020262 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Data Service """ import os from oslo_config import cfg from oslo_log import log import six from manila.i18n import _, _LE, _LI, _LW from manila.common import constants from manila import context from manila.data import helper from manila.data import utils as data_utils from manila import exception from manila import manager from manila.share import rpcapi as share_rpc LOG = log.getLogger(__name__) data_opts = [ cfg.StrOpt( 'migration_tmp_location', default='/tmp/', help="Temporary path to create and mount shares during migration."), ] CONF = cfg.CONF CONF.register_opts(data_opts) class DataManager(manager.Manager): """Receives requests to handle data and sends responses.""" RPC_API_VERSION = '1.0' def __init__(self, service_name=None, *args, **kwargs): super(DataManager, self).__init__(*args, **kwargs) self.busy_tasks_shares = {} def init_host(self): ctxt = context.get_admin_context() shares = self.db.share_get_all(ctxt) for share in shares: if share['task_state'] in constants.BUSY_COPYING_STATES: self.db.share_update( ctxt, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) def migration_start(self, context, ignore_list, share_id, share_instance_id, dest_share_instance_id, migration_info_src, migration_info_dest, notify): LOG.info(_LI( "Received request to migrate share content from share instance " "%(instance_id)s to instance %(dest_instance_id)s."), {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) share_ref = self.db.share_get(context, share_id) share_rpcapi = share_rpc.ShareAPI() mount_path = CONF.migration_tmp_location try: copy = data_utils.Copy( os.path.join(mount_path, share_instance_id), os.path.join(mount_path, dest_share_instance_id), ignore_list) self._copy_share_data( context, copy, share_ref, share_instance_id, dest_share_instance_id, migration_info_src, migration_info_dest) except exception.ShareDataCopyCancelled: share_rpcapi.migration_complete( context, share_ref, share_instance_id, dest_share_instance_id) return except Exception: self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) msg = _("Failed to copy contents from instance %(src)s to " "instance %(dest)s.") % {'src': share_instance_id, 'dest': dest_share_instance_id} LOG.exception(msg) share_rpcapi.migration_complete( context, share_ref, share_instance_id, dest_share_instance_id) raise exception.ShareDataCopyFailed(reason=msg) finally: self.busy_tasks_shares.pop(share_id, None) LOG.info(_LI( "Completed copy operation of migrating share content from share " "instance %(instance_id)s to instance %(dest_instance_id)s."), {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) if notify: LOG.info(_LI( "Notifying source backend that migrating share content from" " share instance %(instance_id)s to instance " "%(dest_instance_id)s completed."), {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) share_rpcapi.migration_complete( context, share_ref, share_instance_id, dest_share_instance_id) def data_copy_cancel(self, context, share_id): LOG.info(_LI("Received request to cancel share migration " "of share %s."), share_id) copy = self.busy_tasks_shares.get(share_id) if copy: copy.cancel() else: msg = _("Data copy for migration of share %s cannot be cancelled" " at this moment.") % share_id LOG.error(msg) raise exception.InvalidShare(reason=msg) def data_copy_get_progress(self, context, share_id): LOG.info(_LI("Received request to get share migration information " "of share %s."), share_id) copy = self.busy_tasks_shares.get(share_id) if copy: result = copy.get_progress() LOG.info(_LI("Obtained following share migration information " "of share %(share)s: %(info)s."), {'share': share_id, 'info': six.text_type(result)}) return result else: msg = _("Migration of share %s data copy progress cannot be " "obtained at this moment.") % share_id LOG.error(msg) raise exception.InvalidShare(reason=msg) def _copy_share_data( self, context, copy, src_share, share_instance_id, dest_share_instance_id, migration_info_src, migration_info_dest): copied = False mount_path = CONF.migration_tmp_location self.db.share_update( context, src_share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper_src = helper.DataServiceHelper(context, self.db, src_share) helper_dest = helper_src access_ref_src = helper_src.allow_access_to_data_service( src_share, share_instance_id, dest_share_instance_id) access_ref_dest = access_ref_src def _call_cleanups(items): for item in items: if 'unmount_src' == item: helper_src.cleanup_unmount_temp_folder( migration_info_src['unmount'], mount_path, share_instance_id) elif 'temp_folder_src' == item: helper_src.cleanup_temp_folder(share_instance_id, mount_path) elif 'temp_folder_dest' == item: helper_dest.cleanup_temp_folder(dest_share_instance_id, mount_path) elif 'access_src' == item: helper_src.cleanup_data_access(access_ref_src, share_instance_id) elif 'access_dest' == item: helper_dest.cleanup_data_access(access_ref_dest, dest_share_instance_id) try: helper_src.mount_share_instance( migration_info_src['mount'], mount_path, share_instance_id) except Exception: msg = _("Share migration failed attempting to mount " "share instance %s.") % share_instance_id LOG.exception(msg) _call_cleanups(['temp_folder_src', 'access_dest', 'access_src']) raise exception.ShareDataCopyFailed(reason=msg) try: helper_dest.mount_share_instance( migration_info_dest['mount'], mount_path, dest_share_instance_id) except Exception: msg = _("Share migration failed attempting to mount " "share instance %s.") % dest_share_instance_id LOG.exception(msg) _call_cleanups(['temp_folder_dest', 'unmount_src', 'temp_folder_src', 'access_dest', 'access_src']) raise exception.ShareDataCopyFailed(reason=msg) self.busy_tasks_shares[src_share['id']] = copy self.db.share_update( context, src_share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}) try: copy.run() self.db.share_update( context, src_share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}) if copy.get_progress()['total_progress'] == 100: copied = True except Exception: LOG.exception(_LE("Failed to copy data from share instance " "%(share_instance_id)s to " "%(dest_share_instance_id)s."), {'share_instance_id': share_instance_id, 'dest_share_instance_id': dest_share_instance_id}) try: helper_src.unmount_share_instance(migration_info_src['unmount'], mount_path, share_instance_id) except Exception: LOG.exception(_LE("Could not unmount folder of instance" " %s after its data copy."), share_instance_id) try: helper_dest.unmount_share_instance( migration_info_dest['unmount'], mount_path, dest_share_instance_id) except Exception: LOG.exception(_LE("Could not unmount folder of instance" " %s after its data copy."), dest_share_instance_id) try: helper_src.deny_access_to_data_service( access_ref_src, share_instance_id) except Exception: LOG.exception(_LE("Could not deny access to instance" " %s after its data copy."), share_instance_id) try: helper_dest.deny_access_to_data_service( access_ref_dest, dest_share_instance_id) except Exception: LOG.exception(_LE("Could not deny access to instance" " %s after its data copy."), dest_share_instance_id) if copy and copy.cancelled: self.db.share_update( context, src_share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_CANCELLED}) LOG.warning(_LW("Copy of data from share instance " "%(src_instance)s to share instance " "%(dest_instance)s was cancelled."), {'src_instance': share_instance_id, 'dest_instance': dest_share_instance_id}) raise exception.ShareDataCopyCancelled( src_instance=share_instance_id, dest_instance=dest_share_instance_id) elif not copied: msg = _("Copying data from share instance %(instance_id)s " "to %(dest_instance_id)s did not succeed.") % ( {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) raise exception.ShareDataCopyFailed(reason=msg) self.db.share_update( context, src_share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED}) LOG.debug("Copy of data from share instance %(src_instance)s to " "share instance %(dest_instance)s was successful.", {'src_instance': share_instance_id, 'dest_instance': dest_share_instance_id}) manila-2.0.0/manila/data/rpcapi.py0000664000567000056710000000447712701407107020130 0ustar jenkinsjenkins00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the data manager RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from manila import rpc CONF = cfg.CONF class DataAPI(object): """Client side of the data RPC API. API version history: 1.0 - Initial version, Add migration_start(), data_copy_cancel(), data_copy_get_progress() """ BASE_RPC_API_VERSION = '1.0' def __init__(self): super(DataAPI, self).__init__() target = messaging.Target(topic=CONF.data_topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.0') def migration_start(self, context, share_id, ignore_list, share_instance_id, dest_share_instance_id, migration_info_src, migration_info_dest, notify): call_context = self.client.prepare(version='1.0') call_context.cast( context, 'migration_start', share_id=share_id, ignore_list=ignore_list, share_instance_id=share_instance_id, dest_share_instance_id=dest_share_instance_id, migration_info_src=migration_info_src, migration_info_dest=migration_info_dest, notify=notify) def data_copy_cancel(self, context, share_id): call_context = self.client.prepare(version='1.0') call_context.call(context, 'data_copy_cancel', share_id=share_id) def data_copy_get_progress(self, context, share_id): call_context = self.client.prepare(version='1.0') return call_context.call(context, 'data_copy_get_progress', share_id=share_id) manila-2.0.0/manila/share/0000775000567000056710000000000012701407265016462 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/hook.py0000664000567000056710000001341412701407107017772 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module with hook interface for actions performed by share driver. All available hooks are placed in manila/share/hooks dir. Hooks are used by share services and can serve several use cases such as any kind of notification and performing additional backend-specific actions. """ import abc from oslo_config import cfg from oslo_log import log import six from manila import context as ctxt from manila.i18n import _LW hook_options = [ cfg.BoolOpt( "enable_pre_hooks", default=False, help="Whether to enable pre hooks or not.", deprecated_group='DEFAULT'), cfg.BoolOpt( "enable_post_hooks", default=False, help="Whether to enable post hooks or not.", deprecated_group='DEFAULT'), cfg.BoolOpt( "enable_periodic_hooks", default=False, help="Whether to enable periodic hooks or not.", deprecated_group='DEFAULT'), cfg.BoolOpt( "suppress_pre_hooks_errors", default=False, help="Whether to suppress pre hook errors (allow driver perform " "actions) or not.", deprecated_group='DEFAULT'), cfg.BoolOpt( "suppress_post_hooks_errors", default=False, help="Whether to suppress post hook errors (allow driver's results " "to pass through) or not.", deprecated_group='DEFAULT'), cfg.FloatOpt( "periodic_hooks_interval", default=300.0, help="Interval in seconds between execution of periodic hooks. " "Used when option 'enable_periodic_hooks' is set to True. " "Default is 300.", deprecated_group='DEFAULT'), ] CONF = cfg.CONF CONF.register_opts(hook_options) LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class HookBase(object): def get_config_option(self, key): if self.configuration: return self.configuration.safe_get(key) return CONF.get(key) def __init__(self, configuration, host): self.host = host self.configuration = configuration if self.configuration: self.configuration.append_config_values(hook_options) self.pre_hooks_enabled = self.get_config_option("enable_pre_hooks") self.post_hooks_enabled = self.get_config_option("enable_post_hooks") self.periodic_hooks_enabled = self.get_config_option( "enable_periodic_hooks") self.suppress_pre_hooks_errors = self.get_config_option( "suppress_pre_hooks_errors") self.suppress_post_hooks_errors = self.get_config_option( "suppress_post_hooks_errors") def execute_pre_hook(self, context=None, func_name=None, *args, **kwargs): """Hook called before driver's action.""" if not self.pre_hooks_enabled: return LOG.debug("Running 'pre hook'.") context = context or ctxt.get_admin_context() try: pre_data = self._execute_pre_hook( context=context, func_name=func_name, *args, **kwargs) except Exception as e: if self.suppress_pre_hooks_errors: LOG.warning(_LW("\nSuppressed exception in pre hook. %s\n"), e) pre_data = e else: raise return pre_data def execute_post_hook(self, context=None, func_name=None, pre_hook_data=None, driver_action_results=None, *args, **kwargs): """Hook called after driver's action.""" if not self.post_hooks_enabled: return LOG.debug("Running 'post hook'.") context = context or ctxt.get_admin_context() try: post_data = self._execute_post_hook( context=context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, *args, **kwargs) except Exception as e: if self.suppress_post_hooks_errors: LOG.warning( _LW("\nSuppressed exception in post hook. %s\n"), e) post_data = e else: raise return post_data def execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Hook called on periodic basis.""" if not self.periodic_hooks_enabled: return LOG.debug("Running 'periodic hook'.") context = context or ctxt.get_admin_context() return self._execute_periodic_hook( context, periodic_hook_data, *args, **kwargs) @abc.abstractmethod def _execute_pre_hook(self, context, func_name, *args, **kwargs): """Redefine this method for pre hook action.""" @abc.abstractmethod def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): """Redefine this method for post hook action.""" @abc.abstractmethod def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Redefine this method for periodic hook action.""" manila-2.0.0/manila/share/drivers_private_data.py0000664000567000056710000001407012701407107023232 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module provides possibility for share drivers to store private information related to common Manila models like Share or Snapshot. """ import abc from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils import six from manila.db import api as db_api from manila.i18n import _ private_data_opts = [ cfg.StrOpt( 'drivers_private_storage_class', default='manila.share.drivers_private_data.SqlStorageDriver', help='The full class name of the Private Data Driver class to use.'), ] CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class StorageDriver(object): def __init__(self, context, backend_host): # Backend shouldn't access data stored by another backend self.backend_host = backend_host self.context = context @abc.abstractmethod def get(self, entity_id, key, default): """Backend implementation for DriverPrivateData.get() method. Should return all keys for given 'entity_id' if 'key' is None. Otherwise should return value for provided 'key'. If values for provided 'entity_id' or 'key' not found, should return 'default'. See DriverPrivateData.get() method for more details. """ @abc.abstractmethod def update(self, entity_id, details, delete_existing): """Backend implementation for DriverPrivateData.update() method. Should update details for given 'entity_id' with behaviour defined by 'delete_existing' boolean flag. See DriverPrivateData.update() method for more details. """ @abc.abstractmethod def delete(self, entity_id, key): """Backend implementation for DriverPrivateData.delete() method. Should return delete all keys if 'key' is None. Otherwise should delete value for provided 'key'. See DriverPrivateData.update() method for more details. """ class SqlStorageDriver(StorageDriver): def update(self, entity_id, details, delete_existing): return db_api.driver_private_data_update( self.context, self.backend_host, entity_id, details, delete_existing ) def get(self, entity_id, key, default): return db_api.driver_private_data_get( self.context, self.backend_host, entity_id, key, default ) def delete(self, entity_id, key): return db_api.driver_private_data_delete( self.context, self.backend_host, entity_id, key ) class DriverPrivateData(object): def __init__(self, storage=None, *args, **kwargs): """Init method. :param storage: None or inheritor of StorageDriver abstract class :param config_group: Optional -- Config group used for loading settings :param context: Optional -- Current context :param backend_host: Optional -- Driver host """ config_group_name = kwargs.get('config_group') CONF.register_opts(private_data_opts, group=config_group_name) if storage is not None: self._storage = storage elif 'context' in kwargs and 'backend_host' in kwargs: if config_group_name: conf = getattr(CONF, config_group_name) else: conf = CONF storage_class = conf.drivers_private_storage_class cls = importutils.import_class(storage_class) self._storage = cls(kwargs.get('context'), kwargs.get('backend_host')) else: msg = _("You should provide 'storage' parameter or" " 'context' and 'backend_host' parameters.") raise ValueError(msg) def get(self, entity_id, key=None, default=None): """Get one, list or all key-value pairs. :param entity_id: Model UUID :param key: Key string or list of keys :param default: Default value for case when key(s) not found :returns: string or dict """ self._validate_entity_id(entity_id) return self._storage.get(entity_id, key, default) def update(self, entity_id, details, delete_existing=False): """Update or create specified key-value pairs. :param entity_id: Model UUID :param details: dict with key-value pairs data. Keys and values should be strings. :param delete_existing: boolean flag which determines behaviour for existing key-value pairs: True - remove all existing key-value pairs False (default) - leave as is """ self._validate_entity_id(entity_id) if not isinstance(details, dict): msg = (_("Provided details %s is not valid dict.") % six.text_type(details)) raise ValueError(msg) return self._storage.update( entity_id, details, delete_existing) def delete(self, entity_id, key=None): """Delete one, list or all key-value pairs. :param entity_id: Model UUID :param key: Key string or list of keys """ self._validate_entity_id(entity_id) return self._storage.delete(entity_id, key) @staticmethod def _validate_entity_id(entity_id): if not uuidutils.is_uuid_like(entity_id): msg = (_("Provided entity_id %s is not valid UUID.") % six.text_type(entity_id)) raise ValueError(msg) manila-2.0.0/manila/share/utils.py0000664000567000056710000000553312701407112020171 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Share-related Utilities and helpers.""" from oslo_log import log LOG = log.getLogger(__name__) DEFAULT_POOL_NAME = '_pool0' def extract_host(host, level='backend', use_default_pool_name=False): """Extract Host, Backend or Pool information from host string. :param host: String for host, which could include host@backend#pool info :param level: Indicate which level of information should be extracted from host string. Level can be 'host', 'backend', 'pool', or 'backend_name', default value is 'backend' :param use_default_pool_name: This flag specifies what to do if level == 'pool' and there is no 'pool' info encoded in host string. default_pool_name=True will return DEFAULT_POOL_NAME, otherwise it will return None. Default value of this parameter is False. :return: expected level of information For example: host = 'HostA@BackendB#PoolC' ret = extract_host(host, 'host') # ret is 'HostA' ret = extract_host(host, 'backend') # ret is 'HostA@BackendB' ret = extract_host(host, 'pool') # ret is 'PoolC' ret = extract_host(host, 'backend_name') # ret is 'BackendB' host = 'HostX@BackendY' ret = extract_host(host, 'pool') # ret is None ret = extract_host(host, 'pool', True) # ret is '_pool0' """ if level == 'host': # Make sure pool is not included hst = host.split('#')[0] return hst.split('@')[0] if level == 'backend_name': hst = host.split('#')[0] return hst.split('@')[1] elif level == 'backend': return host.split('#')[0] elif level == 'pool': lst = host.split('#') if len(lst) == 2: return lst[1] elif use_default_pool_name is True: return DEFAULT_POOL_NAME else: return None def append_host(host, pool): """Encode pool into host info.""" if not host or not pool: return host new_host = "#".join([host, pool]) return new_host manila-2.0.0/manila/share/__init__.py0000664000567000056710000000200312701407107020561 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from manila.share import ' elsewhere. import oslo_utils.importutils as import_utils from manila.common import config CONF = config.CONF API = import_utils.import_class(CONF.share_api_class) manila-2.0.0/manila/share/drivers/0000775000567000056710000000000012701407265020140 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/zfssa/0000775000567000056710000000000012701407265021266 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/zfssa/__init__.py0000664000567000056710000000000012701407107023360 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/zfssa/zfssashare.py0000664000567000056710000003452412701407107024014 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Manila Share Driver """ import base64 from oslo_config import cfg from oslo_log import log from oslo_utils import units import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.share import driver from manila.share.drivers.zfssa import zfssarest ZFSSA_OPTS = [ cfg.StrOpt('zfssa_host', help='ZFSSA management IP address.'), cfg.StrOpt('zfssa_data_ip', help='IP address for data.'), cfg.StrOpt('zfssa_auth_user', help='ZFSSA management authorized username.'), cfg.StrOpt('zfssa_auth_password', help='ZFSSA management authorized userpassword.'), cfg.StrOpt('zfssa_pool', help='ZFSSA storage pool name.'), cfg.StrOpt('zfssa_project', help='ZFSSA project name.'), cfg.StrOpt('zfssa_nas_checksum', default='fletcher4', help='Controls checksum used for data blocks.'), cfg.StrOpt('zfssa_nas_compression', default='off', help='Data compression-off, lzjb, gzip-2, gzip, gzip-9.'), cfg.StrOpt('zfssa_nas_logbias', default='latency', help='Controls behavior when servicing synchronous writes.'), cfg.StrOpt('zfssa_nas_mountpoint', default='', help='Location of project in ZFS/SA.'), cfg.StrOpt('zfssa_nas_quota_snap', default='true', help='Controls whether a share quota includes snapshot.'), cfg.StrOpt('zfssa_nas_rstchown', default='true', help='Controls whether file ownership can be changed.'), cfg.StrOpt('zfssa_nas_vscan', default='false', help='Controls whether the share is scanned for viruses.'), cfg.StrOpt('zfssa_rest_timeout', help='REST connection timeout (in seconds).') ] cfg.CONF.register_opts(ZFSSA_OPTS) LOG = log.getLogger(__name__) def factory_zfssa(): return zfssarest.ZFSSAApi() class ZFSSAShareDriver(driver.ShareDriver): """ZFSSA share driver: Supports NFS and CIFS protocols. Uses ZFSSA RESTful API to create shares and snapshots on backend. API version history: 1.0 - Initial version. 1.0.1 - Add share shrink/extend feature. """ VERSION = '1.0.1' PROTOCOL = 'NFS_CIFS' def __init__(self, *args, **kwargs): super(ZFSSAShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) self.zfssa = None self._stats = None self.mountpoint = '/export/' lcfg = self.configuration required = [ 'zfssa_host', 'zfssa_data_ip', 'zfssa_auth_user', 'zfssa_auth_password', 'zfssa_pool', 'zfssa_project' ] for prop in required: if not getattr(lcfg, prop, None): exception_msg = _('%s is required in manila.conf') % prop LOG.error(exception_msg) raise exception.InvalidParameterValue(exception_msg) self.default_args = { 'compression': lcfg.zfssa_nas_compression, 'logbias': lcfg.zfssa_nas_logbias, 'checksum': lcfg.zfssa_nas_checksum, 'vscan': lcfg.zfssa_nas_vscan, 'rstchown': lcfg.zfssa_nas_rstchown, } self.share_args = { 'sharedav': 'off', 'shareftp': 'off', 'sharesftp': 'off', 'sharetftp': 'off', 'root_permissions': '777', 'sharenfs': 'sec=sys', 'sharesmb': 'off', 'quota_snap': self.configuration.zfssa_nas_quota_snap, 'reservation_snap': self.configuration.zfssa_nas_quota_snap, } def do_setup(self, context): """Login, create project, no sharing option enabled.""" lcfg = self.configuration LOG.debug("Connecting to host: %s.", lcfg.zfssa_host) self.zfssa = factory_zfssa() self.zfssa.set_host(lcfg.zfssa_host, timeout=lcfg.zfssa_rest_timeout) creds = '%s:%s' % (lcfg.zfssa_auth_user, lcfg.zfssa_auth_password) auth_str = base64.encodestring(six.b(creds))[:-1] self.zfssa.login(auth_str) if lcfg.zfssa_nas_mountpoint == '': self.mountpoint += lcfg.zfssa_project else: self.mountpoint += lcfg.zfssa_nas_mountpoint arg = { 'name': lcfg.zfssa_project, 'sharesmb': 'off', 'sharenfs': 'off', 'mountpoint': self.mountpoint, } arg.update(self.default_args) self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, arg) self.zfssa.enable_service('nfs') self.zfssa.enable_service('smb') def check_for_setup_error(self): """Check for properly configured pool, project.""" lcfg = self.configuration LOG.debug("Verifying pool %s.", lcfg.zfssa_pool) self.zfssa.verify_pool(lcfg.zfssa_pool) LOG.debug("Verifying project %s.", lcfg.zfssa_project) self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project) def _export_location(self, share): """Export share's location based on protocol used.""" lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': share['id'], } location = '' proto = share['share_proto'] if proto == 'NFS': location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) elif proto == 'CIFS': location = ("\\\\%(host)s\\%(name)s" % arg) else: exception_msg = _('Protocol %s is not supported.') % proto LOG.error(exception_msg) raise exception.InvalidParameterValue(exception_msg) LOG.debug("Export location: %s.", location) return location def create_arg(self, size): size = units.Gi * int(size) arg = { 'quota': size, 'reservation': size, } arg.update(self.share_args) return arg def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used. The created share inherits properties from its project. """ lcfg = self.configuration arg = self.create_arg(share['size']) arg.update(self.default_args) arg.update({'name': share['id']}) if share['share_proto'] == 'CIFS': arg.update({'sharesmb': 'on'}) LOG.debug("ZFSSAShareDriver.create_share: id=%(name)s, size=%(quota)s", {'name': arg['name'], 'quota': arg['quota']}) self.zfssa.create_share(lcfg.zfssa_pool, lcfg.zfssa_project, arg) return self._export_location(share) def delete_share(self, context, share, share_server=None): """Delete a share. Shares with existing snapshots can't be deleted. """ LOG.debug("ZFSSAShareDriver.delete_share: id=%s", share['id']) lcfg = self.configuration self.zfssa.delete_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id']) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of the snapshot['share_id'].""" LOG.debug("ZFSSAShareDriver.create_snapshot: " "id=%(snap)s share=%(share)s", {'snap': snapshot['id'], 'share': snapshot['share_id']}) lcfg = self.configuration self.zfssa.create_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Create a share from a snapshot - clone a snapshot.""" lcfg = self.configuration LOG.debug("ZFSSAShareDriver.create_share_from_snapshot: clone=%s", share['id']) LOG.debug("ZFSSAShareDriver.create_share_from_snapshot: snapshot=%s", snapshot['id']) arg = self.create_arg(share['size']) details = { 'share': share['id'], 'project': lcfg.zfssa_project, } arg.update(details) if share['share_proto'] == 'CIFS': arg.update({'sharesmb': 'on'}) self.zfssa.clone_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot, share, arg) return self._export_location(share) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot. Snapshots with existing clones cannot be deleted. """ LOG.debug("ZFSSAShareDriver.delete_snapshot: id=%s", snapshot['id']) lcfg = self.configuration has_clones = self.zfssa.has_clones(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) if has_clones: LOG.error(_LE("snapshot %s: has clones"), snapshot['id']) raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot['id']) self.zfssa.delete_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) def ensure_share(self, context, share, share_server=None): lcfg = self.configuration details = self.zfssa.get_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id']) if not details: msg = (_("Share %s doesn't exists.") % share['id']) raise exception.ManilaException(msg) def shrink_share(self, share, new_size, share_server=None): """Shrink a share to new_size.""" lcfg = self.configuration details = self.zfssa.get_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id']) used_space = details['space_data'] new_size_byte = int(new_size) * units.Gi if used_space > new_size_byte: LOG.error(_LE('%(used).1fGB of share %(id)s is already used. ' 'Cannot shrink to %(newsize)dGB.'), {'used': float(used_space) / units.Gi, 'id': share['id'], 'newsize': new_size}) raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) arg = self.create_arg(new_size) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], arg) def extend_share(self, share, new_size, share_server=None): """Extend a share to new_size.""" lcfg = self.configuration free_space = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) diff_space = int(new_size - share['size']) * units.Gi if diff_space > free_space: msg = (_('There is not enough free space in project %s') % (lcfg.zfssa_project)) LOG.error(msg) raise exception.ShareExtendingError(share_id=share['id'], reason=msg) arg = self.create_arg(new_size) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], arg) def allow_access(self, context, share, access, share_server=None): """Allows access to an NFS share for the specified IP.""" LOG.debug("ZFSSAShareDriver.allow_access: share=%s", share['id']) lcfg = self.configuration if share['share_proto'] == 'NFS': self.zfssa.allow_access_nfs(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], access) def deny_access(self, context, share, access, share_server=None): """Deny access to an NFS share for the specified IP.""" LOG.debug("ZFSSAShareDriver.deny_access: share=%s", share['id']) lcfg = self.configuration if share['share_proto'] == 'NFS': self.zfssa.deny_access_nfs(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], access) elif share['share_proto'] == 'CIFS': return def _update_share_stats(self): """Retrieve stats info from a share.""" backend_name = self.configuration.safe_get('share_backend_name') data = dict( share_backend_name=backend_name or self.__class__.__name__, vendor_name='Oracle', driver_version=self.VERSION, storage_protocol=self.PROTOCOL) lcfg = self.configuration (avail, used) = self.zfssa.get_pool_stats(lcfg.zfssa_pool) if avail: data['free_capacity_gb'] = int(avail) / units.Gi if used: total = int(avail) + int(used) data['total_capacity_gb'] = total / units.Gi else: data['total_capacity_gb'] = 0 else: data['free_capacity_gb'] = 0 data['total_capacity_gb'] = 0 super(ZFSSAShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 0 manila-2.0.0/manila/share/drivers/zfssa/restclient.py0000664000567000056710000003111712701407107024012 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance REST API Client Programmatic Interface TODO(diemtran): this module needs to be placed in a library common to OpenStack services. When this happens, the file should be removed from Manila code base and imported from the relevant library. """ import time from oslo_serialization import jsonutils import six from six.moves import http_client # pylint: disable=E0611,F0401 from six.moves.urllib import error as urlerror from six.moves.urllib import request as urlrequest def log_debug_msg(obj, message): if obj.log_function: obj.log_function(message) class Status(object): """Result HTTP Status.""" #: Request return OK OK = http_client.OK # pylint: disable=invalid-name #: New resource created successfully CREATED = http_client.CREATED #: Command accepted ACCEPTED = http_client.ACCEPTED #: Command returned OK but no data will be returned NO_CONTENT = http_client.NO_CONTENT #: Bad Request BAD_REQUEST = http_client.BAD_REQUEST #: User is not authorized UNAUTHORIZED = http_client.UNAUTHORIZED #: The request is not allowed FORBIDDEN = http_client.FORBIDDEN #: The requested resource was not found NOT_FOUND = http_client.NOT_FOUND #: The request is not allowed NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED #: Request timed out TIMEOUT = http_client.REQUEST_TIMEOUT #: Invalid request CONFLICT = http_client.CONFLICT #: Service Unavailable BUSY = http_client.SERVICE_UNAVAILABLE class RestResult(object): """Result from a REST API operation.""" def __init__(self, logfunc=None, response=None, err=None): """Initialize a RestResult containing the results from a REST call. :param logfunc: debug log function. :param response: HTTP response. :param err: HTTP error. """ self.response = response self.log_function = logfunc self.error = err self.data = "" self.status = 0 if self.response: self.status = self.response.getcode() result = self.response.read() while result: self.data += result result = self.response.read() if self.error: self.status = self.error.code self.data = http_client.responses[self.status] log_debug_msg(self, 'Response code: %s' % self.status) log_debug_msg(self, 'Response data: %s' % self.data) def get_header(self, name): """Get an HTTP header with the given name from the results. :param name: HTTP header name. :return: The header value or None if no value is found. """ if self.response is None: return None info = self.response.info() return info.getheader(name) class RestClientError(Exception): """Exception for ZFS REST API client errors.""" def __init__(self, status, name="ERR_INTERNAL", message=None): """Create a REST Response exception. :param status: HTTP response status. :param name: The name of the REST API error type. :param message: Descriptive error message returned from REST call. """ super(RestClientError, self).__init__(message) self.code = status self.name = name self.msg = message if status in http_client.responses: self.msg = http_client.responses[status] def __str__(self): return "%d %s %s" % (self.code, self.name, self.msg) class RestClientURL(object): # pylint: disable=R0902 """ZFSSA urllib client.""" def __init__(self, url, logfunc=None, **kwargs): """Initialize a REST client. :param url: The ZFSSA REST API URL. :key session: HTTP Cookie value of x-auth-session obtained from a normal BUI login. :key timeout: Time in seconds to wait for command to complete. (Default is 60 seconds). """ self.url = url self.log_function = logfunc self.local = kwargs.get("local", False) self.base_path = kwargs.get("base_path", "/api") self.timeout = kwargs.get("timeout", 60) self.headers = None if kwargs.get('session'): self.headers['x-auth-session'] = kwargs.get('session') self.headers = {"content-type": "application/json"} self.do_logout = False self.auth_str = None def _path(self, path, base_path=None): """Build rest url path.""" if path.startswith("http://") or path.startswith("https://"): return path if base_path is None: base_path = self.base_path if not path.startswith(base_path) and not ( self.local and ("/api" + path).startswith(base_path)): path = "%s%s" % (base_path, path) if self.local and path.startswith("/api"): path = path[4:] return self.url + path def _authorize(self): """Performs authorization setting x-auth-session.""" self.headers['authorization'] = 'Basic %s' % self.auth_str if 'x-auth-session' in self.headers: del self.headers['x-auth-session'] try: result = self.post("/access/v1") del self.headers['authorization'] if result.status == http_client.CREATED: self.headers['x-auth-session'] = \ result.get_header('x-auth-session') self.do_logout = True log_debug_msg(self, ('ZFSSA version: %s') % result.get_header('x-zfssa-version')) elif result.status == http_client.NOT_FOUND: raise RestClientError(result.status, name="ERR_RESTError", message=("REST Not Available:" "Please Upgrade")) except RestClientError as err: del self.headers['authorization'] raise err def login(self, auth_str): """Login to an appliance using a user name and password. Start a session like what is done logging into the BUI. This is not a requirement to run REST commands, since the protocol is stateless. What is does is set up a cookie session so that some server side caching can be done. If login is used remember to call logout when finished. :param auth_str: Authorization string (base64). """ self.auth_str = auth_str self._authorize() def logout(self): """Logout of an appliance.""" result = None try: result = self.delete("/access/v1", base_path="/api") except RestClientError: pass self.headers.clear() self.do_logout = False return result def islogin(self): """return if client is login.""" return self.do_logout @staticmethod def mkpath(*args, **kwargs): """Make a path?query string for making a REST request. :cmd_params args: The path part. :cmd_params kwargs: The query part. """ buf = six.StringIO() query = "?" for arg in args: buf.write("/") buf.write(arg) for k in kwargs: buf.write(query) if query == "?": query = "&" buf.write(k) buf.write("=") buf.write(kwargs[k]) return buf.getvalue() # pylint: disable=R0912 def request(self, path, request, body=None, **kwargs): """Make an HTTP request and return the results. :param path: Path used with the initialized URL to make a request. :param request: HTTP request type (GET, POST, PUT, DELETE). :param body: HTTP body of request. :key accept: Set HTTP 'Accept' header with this value. :key base_path: Override the base_path for this request. :key content: Set HTTP 'Content-Type' header with this value. """ out_hdrs = dict.copy(self.headers) if kwargs.get("accept"): out_hdrs['accept'] = kwargs.get("accept") if body: if isinstance(body, dict): body = six.text_type(jsonutils.dumps(body)) if body and len(body): out_hdrs['content-length'] = len(body) zfssaurl = self._path(path, kwargs.get("base_path")) req = urlrequest.Request(zfssaurl, body, out_hdrs) req.get_method = lambda: request maxreqretries = kwargs.get("maxreqretries", 10) retry = 0 response = None log_debug_msg(self, 'Request: %s %s' % (request, zfssaurl)) log_debug_msg(self, 'Out headers: %s' % out_hdrs) if body and body != '': log_debug_msg(self, 'Body: %s' % body) while retry < maxreqretries: try: response = urlrequest.urlopen(req, timeout=self.timeout) except urlerror.HTTPError as err: if err.code == http_client.NOT_FOUND: log_debug_msg(self, 'REST Not Found: %s' % err.code) else: log_debug_msg(self, ('REST Not Available: %s') % err.code) if (err.code == http_client.SERVICE_UNAVAILABLE and retry < maxreqretries): retry += 1 time.sleep(1) log_debug_msg(self, ('Server Busy retry request: %s') % retry) continue if ((err.code == http_client.UNAUTHORIZED or err.code == http_client.INTERNAL_SERVER_ERROR) and '/access/v1' not in zfssaurl): try: log_debug_msg(self, ('Authorizing request: ' '%(zfssaurl)s' 'retry: %(retry)d .') % {'zfssaurl': zfssaurl, 'retry': retry}) self._authorize() req.add_header('x-auth-session', self.headers['x-auth-session']) except RestClientError: log_debug_msg(self, ('Cannot authorize.')) retry += 1 time.sleep(1) continue return RestResult(self.log_function, err=err) except urlerror.URLError as err: log_debug_msg(self, ('URLError: %s') % err.reason) raise RestClientError(-1, name="ERR_URLError", message=err.reason) break if ((response and response.getcode() == http_client.SERVICE_UNAVAILABLE) and retry >= maxreqretries): raise RestClientError(response.getcode(), name="ERR_HTTPError", message="REST Not Available: Disabled") return RestResult(self.log_function, response=response) def get(self, path, **kwargs): """Make an HTTP GET request. :param path: Path to resource. """ return self.request(path, "GET", **kwargs) def post(self, path, body="", **kwargs): """Make an HTTP POST request. :param path: Path to resource. :param body: Post data content. """ return self.request(path, "POST", body, **kwargs) def put(self, path, body="", **kwargs): """Make an HTTP PUT request. :param path: Path to resource. :param body: Put data content. """ return self.request(path, "PUT", body, **kwargs) def delete(self, path, **kwargs): """Make an HTTP DELETE request. :param path: Path to resource that will be deleted. """ return self.request(path, "DELETE", **kwargs) def head(self, path, **kwargs): """Make an HTTP HEAD request. :param path: Path to resource. """ return self.request(path, "HEAD", **kwargs) manila-2.0.0/manila/share/drivers/zfssa/zfssarest.py0000664000567000056710000004106112701407107023661 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Proxy """ from oslo_log import log from oslo_serialization import jsonutils from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.share.drivers.zfssa import restclient LOG = log.getLogger(__name__) def factory_restclient(url, logfunc, **kwargs): return restclient.RestClientURL(url, logfunc, **kwargs) class ZFSSAApi(object): """ZFSSA API proxy class.""" pools_path = '/api/storage/v1/pools' pool_path = pools_path + '/%s' projects_path = pool_path + '/projects' project_path = projects_path + '/%s' shares_path = project_path + '/filesystems' share_path = shares_path + '/%s' snapshots_path = share_path + '/snapshots' snapshot_path = snapshots_path + '/%s' clone_path = snapshot_path + '/clone' service_path = '/api/service/v1/services/%s/enable' def __init__(self): self.host = None self.url = None self.rclient = None def __del__(self): if self.rclient: del self.rclient def rest_get(self, path, expected): ret = self.rclient.get(path) if ret.status != expected: exception_msg = (_('Rest call to %(host)s %(path)s failed.' 'Status: %(status)d Message: %(data)s') % {'host': self.host, 'path': path, 'status': ret.status, 'data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) return ret def _is_pool_owned(self, pdata): """returns True if the pool's owner is the same as the host.""" svc = '/api/system/v1/version' ret = self.rest_get(svc, restclient.Status.OK) vdata = jsonutils.loads(ret.data) return (vdata['version']['asn'] == pdata['pool']['asn'] and vdata['version']['nodename'] == pdata['pool']['owner']) def set_host(self, host, timeout=None): self.host = host self.url = "https://%s:215" % self.host self.rclient = factory_restclient(self.url, LOG.debug, timeout=timeout) def login(self, auth_str): """Login to the appliance.""" if self.rclient and not self.rclient.islogin(): self.rclient.login(auth_str) def enable_service(self, service): """Enable the specified service.""" svc = self.service_path % service ret = self.rclient.put(svc) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_("Cannot enable %s service.") % service) raise exception.ShareBackendException(msg=exception_msg) def verify_avail_space(self, pool, project, share, size): """Check if there is enough space available to a new share.""" self.verify_project(pool, project) avail = self.get_project_stats(pool, project) if avail < size: exception_msg = (_('Error creating ' 'share: %(share)s on ' 'pool: %(pool)s. ' 'Not enough space.') % {'share': share, 'pool': pool}) raise exception.ShareBackendException(msg=exception_msg) def get_pool_stats(self, pool): """Get space_available and used properties of a pool. returns (avail, used). """ svc = self.pool_path % pool ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting pool stats: ' 'pool: %(pool)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.InvalidInput(reason=exception_msg) val = jsonutils.loads(ret.data) if not self._is_pool_owned(val): exception_msg = (_('Error pool ownership: ' 'pool %(pool)s is not owned ' 'by %(host)s.') % {'pool': pool, 'host': self.host}) raise exception.InvalidInput(reason=pool) avail = val['pool']['usage']['available'] used = val['pool']['usage']['used'] return avail, used def get_project_stats(self, pool, project): """Get space_available of a project. Used to check whether a project has enough space (after reservation) or not. """ svc = self.project_path % (pool, project) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting project stats: ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.InvalidInput(reason=exception_msg) val = jsonutils.loads(ret.data) avail = val['project']['space_available'] return avail def create_project(self, pool, project, arg): """Create a project on a pool. Check first whether the pool exists.""" self.verify_pool(pool) svc = self.project_path % (pool, project) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.projects_path % pool ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating project: ' '%(project)s on ' 'pool: %(pool)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) def verify_pool(self, pool): """Checks whether pool exists.""" svc = self.pool_path % pool self.rest_get(svc, restclient.Status.OK) def verify_project(self, pool, project): """Checks whether project exists.""" svc = self.project_path % (pool, project) ret = self.rest_get(svc, restclient.Status.OK) return ret def create_share(self, pool, project, share): """Create a share in the specified pool and project.""" self.verify_avail_space(pool, project, share, share['quota']) svc = self.share_path % (pool, project, share['name']) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.shares_path % (pool, project) ret = self.rclient.post(svc, share) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating ' 'share: %(name)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'name': share['name'], 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) else: exception_msg = (_('Share with name %s already exists.') % share['name']) raise exception.ShareBackendException(msg=exception_msg) def get_share(self, pool, project, share): """Return share properties.""" svc = self.share_path % (pool, project, share) ret = self.rest_get(svc, restclient.Status.OK) val = jsonutils.loads(ret.data) return val['filesystem'] def modify_share(self, pool, project, share, arg): """Modify a set of properties of a share.""" svc = self.share_path % (pool, project, share) ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error modifying %(arg)s ' ' of share %(id)s.') % {'arg': arg, 'id': share}) raise exception.ShareBackendException(msg=exception_msg) def delete_share(self, pool, project, share): """Delete a share. The function assumes the share has no clone or snapshot. """ svc = self.share_path % (pool, project, share) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_LE('Error deleting ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.'), {'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) def create_snapshot(self, pool, project, share, snapshot): """Create a snapshot of the given share.""" svc = self.snapshots_path % (pool, project, share) arg = {'name': snapshot} ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating ' 'snapshot: %(snapshot)s on ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) def delete_snapshot(self, pool, project, share, snapshot): """Delete a snapshot that has no clone.""" svc = self.snapshot_path % (pool, project, share, snapshot) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error deleting ' 'snapshot: %(snapshot)s on ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) def clone_snapshot(self, pool, project, snapshot, clone, arg): """Create a new share from the given snapshot.""" self.verify_avail_space(pool, project, clone['id'], clone['size']) svc = self.clone_path % (pool, project, snapshot['share_id'], snapshot['id']) ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error cloning ' 'snapshot: %(snapshot)s on ' 'share: %(share)s of ' 'Pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot['id'], 'share': snapshot['share_id'], 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) def has_clones(self, pool, project, share, snapshot): """Check whether snapshot has existing clones.""" svc = self.snapshot_path % (pool, project, share, snapshot) ret = self.rest_get(svc, restclient.Status.OK) val = jsonutils.loads(ret.data) return val['snapshot']['numclones'] != 0 def allow_access_nfs(self, pool, project, share, access): """Allow an IP access to a share through NFS.""" if access['access_type'] != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason) ip = access['access_to'] details = self.get_share(pool, project, share) sharenfs = details['sharenfs'] if sharenfs == 'on' or sharenfs == 'rw': LOG.debug('Share %s has read/write permission' 'open to all.', share) return if sharenfs == 'off': sharenfs = 'sec=sys' if ip in sharenfs: LOG.debug('Access to share %(share)s via NFS ' 'already granted to %(ip)s.', {'share': share, 'ip': ip}) return entry = (',rw=@%s' % ip) if '/' not in ip: entry = "%s/32" % entry arg = {'sharenfs': sharenfs + entry} self.modify_share(pool, project, share, arg) def deny_access_nfs(self, pool, project, share, access): """Denies access of an IP to a share through NFS. Since sharenfs property allows a combination of mutiple syntaxes: sharenfs="sec=sys,rw=@first_ip,rw=@second_ip" sharenfs="sec=sys,rw=@first_ip:@second_ip" sharenfs="sec=sys,rw=@first_ip:@second_ip,rw=@third_ip" The function checks what syntax is used and remove the IP accordingly. """ if access['access_type'] != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason) ip = access['access_to'] entry = ('@%s' % ip) if '/' not in ip: entry = "%s/32" % entry details = self.get_share(pool, project, share) if entry not in details['sharenfs']: LOG.debug('IP %(ip)s does not have access ' 'to Share %(share)s via NFS.', {'ip': ip, 'share': share}) return sharenfs = str(details['sharenfs']) argval = '' if sharenfs.find((',rw=%s:' % entry)) >= 0: argval = sharenfs.replace(('%s:' % entry), '') elif sharenfs.find((',rw=%s' % entry)) >= 0: argval = sharenfs.replace((',rw=%s' % entry), '') elif sharenfs.find((':%s' % entry)) >= 0: argval = sharenfs.replace((':%s' % entry), '') arg = {'sharenfs': argval} LOG.debug('deny_access: %s', argval) self.modify_share(pool, project, share, arg) manila-2.0.0/manila/share/drivers/hdfs/0000775000567000056710000000000012701407265021064 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hdfs/__init__.py0000664000567000056710000000000012701407107023156 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hdfs/hdfs_native.py0000664000567000056710000004102712701407112023723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Intel, Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HDFS native protocol (hdfs) driver for manila shares. Manila share is a directory in HDFS. And this share does not use service VM instance (share server). The instance directly talks to the the HDFS cluster. The initial version only supports single namenode and flat network. Configuration Requirements: To enable access control, HDFS file system must have ACLs enabled. """ import math import os import pipes import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import units import six from manila import exception from manila.i18n import _ from manila.share import driver from manila import utils LOG = log.getLogger(__name__) hdfs_native_share_opts = [ cfg.StrOpt('hdfs_namenode_ip', help='The IP of the HDFS namenode.'), cfg.PortOpt('hdfs_namenode_port', default=9000, help='The port of HDFS namenode service.'), cfg.PortOpt('hdfs_ssh_port', default=22, help='HDFS namenode SSH port.'), cfg.StrOpt('hdfs_ssh_name', help='HDFS namenode ssh login name.'), cfg.StrOpt('hdfs_ssh_pw', help='HDFS namenode SSH login password, ' 'This parameter is not necessary, if ' '\'hdfs_ssh_private_key\' is configured.'), cfg.StrOpt('hdfs_ssh_private_key', help='Path to HDFS namenode SSH private ' 'key for login.'), ] CONF = cfg.CONF CONF.register_opts(hdfs_native_share_opts) class HDFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver): """HDFS Share Driver. Executes commands relating to shares. API version history: 1.0 - Initial Version """ def __init__(self, *args, **kwargs): super(HDFSNativeShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(hdfs_native_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'HDFS-Native' self.ssh_connections = {} self._hdfs_execute = None self._hdfs_bin = None self._hdfs_base_path = None def do_setup(self, context): """Do initialization while the share driver starts.""" super(HDFSNativeShareDriver, self).do_setup(context) host = self.configuration.hdfs_namenode_ip local_hosts = socket.gethostbyname_ex(socket.gethostname())[2] if host in local_hosts: self._hdfs_execute = self._hdfs_local_execute else: self._hdfs_execute = self._hdfs_remote_execute self._hdfs_bin = 'hdfs' self._hdfs_base_path = ( 'hdfs://' + self.configuration.hdfs_namenode_ip + ':' + six.text_type(self.configuration.hdfs_namenode_port)) def _hdfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': False}) return utils.execute(*cmd, **kwargs) def _hdfs_remote_execute(self, *cmd, **kwargs): host = self.configuration.hdfs_namenode_ip check_exit_code = kwargs.pop('check_exit_code', False) return self._run_ssh(host, cmd, check_exit_code) def _run_ssh(self, host, cmd_list, check_exit_code=False): command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list) connection = self.ssh_connections.get(host) if not connection: hdfs_ssh_name = self.configuration.hdfs_ssh_name password = self.configuration.hdfs_ssh_pw privatekey = self.configuration.hdfs_ssh_private_key hdfs_ssh_port = self.configuration.hdfs_ssh_port ssh_conn_timeout = self.configuration.ssh_conn_timeout min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn ssh_pool = utils.SSHPool(host, hdfs_ssh_port, ssh_conn_timeout, hdfs_ssh_name, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: msg = (_('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.') % {'cmd': command, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _set_share_size(self, share, size=None): share_dir = '/' + share['name'] if not size: sizestr = six.text_type(share['size']) + 'g' else: sizestr = six.text_type(size) + 'g' try: self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to set space quota for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _create_share(self, share): """Creates a share.""" if share['share_proto'].lower() != 'hdfs': msg = _('Only HDFS protocol supported!') LOG.error(msg) raise exception.HDFSException(msg) share_dir = '/' + share['name'] try: self._hdfs_execute(self._hdfs_bin, 'dfs', '-mkdir', share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to create directory in hdfs for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) # set share size self._set_share_size(share) try: self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-allowSnapshot', share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to allow snapshot for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _get_share_path(self, share): """Return share path on storage provider.""" return os.path.join(self._hdfs_base_path, share['name']) def _get_snapshot_path(self, snapshot): """Return snapshot path on storage provider.""" snapshot_dir = '.snapshot' return os.path.join('/', snapshot['share_name'], snapshot_dir, snapshot['name']) def get_network_allocations_number(self): return 0 def create_share(self, context, share, share_server=None): """Create a HDFS directory which acted as a share.""" self._create_share(share) return self._get_share_path(share) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Creates a snapshot.""" self._create_share(share) share_path = '/' + share['name'] snapshot_path = self._get_snapshot_path(snapshot) try: # check if the directory is empty (out, __) = self._hdfs_execute( self._hdfs_bin, 'dfs', '-ls', snapshot_path) # only copy files when the snapshot directory is not empty if out: copy_path = snapshot_path + "/*" cmd = [self._hdfs_bin, 'dfs', '-cp', copy_path, share_path] self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to create share %(sharename)s from ' 'snapshot %(snapshotname)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'snapshotname': snapshot['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) return self._get_share_path(share) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" share_dir = '/' + snapshot['share_name'] snapshot_name = snapshot['name'] cmd = [self._hdfs_bin, 'dfs', '-createSnapshot', share_dir, snapshot_name] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to create snapshot %(snapshotname)s for ' 'the share %(sharename)s. Error: %(excmsg)s.') % {'snapshotname': snapshot_name, 'sharename': snapshot['share_name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def delete_share(self, context, share, share_server=None): """Deletes share storage.""" share_dir = '/' + share['name'] cmd = [self._hdfs_bin, 'dfs', '-rm', '-r', share_dir] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to delete share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" share_dir = '/' + snapshot['share_name'] cmd = [self._hdfs_bin, 'dfs', '-deleteSnapshot', share_dir, snapshot['name']] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to delete snapshot %(snapshotname)s. ' 'Error: %(excmsg)s.') % {'snapshotname': snapshot['name'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def ensure_share(self, context, share, share_server=None): """Ensure the storage are exported.""" def allow_access(self, context, share, access, share_server=None): """Allows access to the share for a given user.""" if access['access_type'] != 'user': msg = _("Only 'user' access type allowed!") LOG.error(msg) raise exception.InvalidShareAccess(msg) # Note(jun): For directories in HDFS, the x permission is # required to access a child of the directory. if access['access_level'] == 'rw': access_level = 'rwx' elif access['access_level'] == 'ro': access_level = 'r-x' else: msg = (_('The access level %(accesslevel)s was unsupported.') % {'accesslevel': access['access_level']}) LOG.error(msg) raise exception.InvalidShareAccess(msg) share_dir = '/' + share['name'] user_access = ':'.join([access['access_type'], access['access_to'], access_level]) cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] try: (__, out) = self._hdfs_execute(*cmd, check_exit_code=True) except exception.ProcessExecutionError as e: msg = (_('Failed to set ACL of share %(sharename)s for ' 'user: %(username)s' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'username': access['access_to'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def deny_access(self, context, share, access, share_server=None): """Denies the access to the share for a given user.""" share_dir = '/' + share['name'] access_name = ':'.join([access['access_type'], access['access_to']]) cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] try: (__, out) = self._hdfs_execute(*cmd, check_exit_code=True) except exception.ProcessExecutionError as e: msg = (_('Failed to deny ACL of share %(sharename)s for ' 'user: %(username)s' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'username': access['access_to'], 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) def extend_share(self, share, new_size, share_server=None): """Extend share storage.""" self._set_share_size(share, new_size) def _check_hdfs_state(self): try: (out, __) = self._hdfs_execute(self._hdfs_bin, 'fsck', '/') except exception.ProcessExecutionError as e: msg = (_('Failed to check hdfs state. Error: %(excmsg)s.') % {'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) if 'HEALTHY' in out: return True else: return False def check_for_setup_error(self): """Return an error if the prerequisites are met.""" if not self.configuration.hdfs_namenode_ip: msg = _('Not specify the hdfs cluster yet! ' 'Add the ip of hdfs namenode in the ' 'hdfs_namenode_ip configuration parameter.') LOG.error(msg) raise exception.HDFSException(msg) if not self._check_hdfs_state(): msg = _('HDFS is not in healthy state.') LOG.error(msg) raise exception.HDFSException(msg) def _get_available_capacity(self): """Calculate available space on path.""" try: (out, __) = self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-report') except exception.ProcessExecutionError as e: msg = (_('Failed to check available capacity for hdfs.' 'Error: %(excmsg)s.') % {'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) lines = out.splitlines() try: total = int(lines[1].split()[2]) free = int(lines[2].split()[2]) except (IndexError, ValueError) as e: msg = (_('Failed to get hdfs capacity info. ' 'Error: %(excmsg)s.') % {'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.HDFSException(msg) return total, free def _update_share_stats(self): """Retrieves stats info of share directories group.""" data = dict(share_backend_name=self.backend_name, storage_protocol='HDFS', reserved_percentage=self.configuration. reserved_share_percentage) total, free = self._get_available_capacity() data['total_capacity_gb'] = math.ceil(total / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) super(HDFSNativeShareDriver, self)._update_share_stats(data) manila-2.0.0/manila/share/drivers/service_instance.py0000664000567000056710000013726012701407107024042 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Module for managing nova instances for share drivers.""" import abc import os import socket import time import netaddr from oslo_config import cfg from oslo_log import log from oslo_utils import importutils import six from manila.common import constants as const from manila import compute from manila import context from manila import exception from manila.i18n import _ from manila.i18n import _LW from manila.network.linux import ip_lib from manila.network.neutron import api as neutron from manila import utils LOG = log.getLogger(__name__) NEUTRON_NAME = "neutron" NOVA_NAME = "nova" share_servers_handling_mode_opts = [ cfg.StrOpt( "service_image_name", default="manila-service-image", help="Name of image in Glance, that will be used for service instance " "creation. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_name_template", default="manila_service_instance_%s", help="Name of service instance. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "manila_service_keypair_name", default="manila-service", help="Keypair name that will be created and used for service " "instances. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "path_to_public_key", default="~/.ssh/id_rsa.pub", help="Path to hosts public key. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_security_group", default="manila-service", help="Security group name, that will be used for " "service instance creation. " "Only used if driver_handles_share_servers=True."), cfg.IntOpt( "service_instance_flavor_id", default=100, help="ID of flavor, that will be used for service instance " "creation. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_network_name", default="manila_service_network", help="Name of manila service network. Used only with Neutron. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_network_cidr", default="10.254.0.0/16", help="CIDR of manila service network. Used only with Neutron and " "if driver_handles_share_servers=True."), cfg.IntOpt( "service_network_division_mask", default=28, help="This mask is used for dividing service network into " "subnets, IP capacity of subnet with this mask directly " "defines possible amount of created service VMs " "per tenant's subnet. Used only with Neutron " "and if driver_handles_share_servers=True."), cfg.StrOpt( "interface_driver", default="manila.network.linux.interface.OVSInterfaceDriver", help="Vif driver. Used only with Neutron and " "if driver_handles_share_servers=True."), cfg.BoolOpt( "connect_share_server_to_tenant_network", default=False, help="Attach share server directly to share network. " "Used only with Neutron and " "if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_network_helper_type", default=NEUTRON_NAME, help="Allowed values are %s. " % [NOVA_NAME, NEUTRON_NAME] + "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "admin_network_id", help="ID of neutron network used to communicate with admin network," " to create additional admin export locations on."), cfg.StrOpt( "admin_subnet_id", help="ID of neutron subnet used to communicate with admin network," " to create additional admin export locations on. " "Related to 'admin_network_id'."), ] no_share_servers_handling_mode_opts = [ cfg.StrOpt( "service_instance_name_or_id", help="Name or ID of service instance in Nova to use for share " "exports. Used only when share servers handling is disabled."), cfg.StrOpt( "service_net_name_or_ip", help="Can be either name of network that is used by service " "instance within Nova to get IP address or IP address itself " "for managing shares there. " "Used only when share servers handling is disabled."), cfg.StrOpt( "tenant_net_name_or_ip", help="Can be either name of network that is used by service " "instance within Nova to get IP address or IP address itself " "for exporting shares. " "Used only when share servers handling is disabled."), ] common_opts = [ cfg.StrOpt( "service_instance_user", help="User in service instance that will be used for authentication."), cfg.StrOpt( "service_instance_password", secret=True, help="Password for service instance user."), cfg.StrOpt( "path_to_private_key", help="Path to host's private key."), cfg.IntOpt( "max_time_to_build_instance", default=300, help="Maximum time in seconds to wait for creating service instance."), ] CONF = cfg.CONF class ServiceInstanceManager(object): """Manages nova instances for various share drivers. This class provides following external methods: 1. set_up_service_instance: creates instance and sets up share infrastructure. 2. ensure_service_instance: ensure service instance is available. 3. delete_service_instance: removes service instance and network infrastructure. """ _INSTANCE_CONNECTION_PROTO = "SSH" def get_config_option(self, key): """Returns value of config option. :param key: key of config' option. :returns: str -- value of config's option. first priority is driver's config, second priority is global config. """ if self.driver_config: return self.driver_config.safe_get(key) return CONF.get(key) def _get_network_helper(self): network_helper_type = ( self.get_config_option( "service_instance_network_helper_type").lower()) if network_helper_type == NEUTRON_NAME: return NeutronNetworkHelper(self) elif network_helper_type == NOVA_NAME: return NovaNetworkHelper(self) else: raise exception.ManilaException( _("Wrong value '%(provided)s' for config opt " "'service_instance_network_helper_type'. " "Allowed values are %(allowed)s.") % dict( provided=network_helper_type, allowed=[NOVA_NAME, NEUTRON_NAME])) def __init__(self, driver_config=None): super(ServiceInstanceManager, self).__init__() self.driver_config = driver_config if self.driver_config: self.driver_config.append_config_values(common_opts) if self.get_config_option("driver_handles_share_servers"): self.driver_config.append_config_values( share_servers_handling_mode_opts) else: self.driver_config.append_config_values( no_share_servers_handling_mode_opts) else: CONF.register_opts(common_opts) if self.get_config_option("driver_handles_share_servers"): CONF.register_opts(share_servers_handling_mode_opts) else: CONF.register_opts(no_share_servers_handling_mode_opts) if not self.get_config_option("service_instance_user"): raise exception.ServiceInstanceException( _('Service instance user is not specified.')) self.admin_context = context.get_admin_context() self._execute = utils.execute self.compute_api = compute.API() self.path_to_private_key = self.get_config_option( "path_to_private_key") self.max_time_to_build_instance = self.get_config_option( "max_time_to_build_instance") if self.get_config_option("driver_handles_share_servers"): self.path_to_public_key = self.get_config_option( "path_to_public_key") self._network_helper = None @property @utils.synchronized("instantiate_network_helper") def network_helper(self): if not self._network_helper: self._network_helper = self._get_network_helper() self._network_helper.setup_connectivity_with_service_instances() return self._network_helper def get_common_server(self): data = { 'public_address': None, 'private_address': None, 'service_net_name_or_ip': self.get_config_option( 'service_net_name_or_ip'), 'tenant_net_name_or_ip': self.get_config_option( 'tenant_net_name_or_ip'), } data['instance'] = self.compute_api.server_get_by_name_or_id( self.admin_context, self.get_config_option('service_instance_name_or_id')) if netaddr.valid_ipv4(data['service_net_name_or_ip']): data['private_address'] = [data['service_net_name_or_ip']] else: data['private_address'] = self._get_addresses_by_network_name( data['service_net_name_or_ip'], data['instance']) if netaddr.valid_ipv4(data['tenant_net_name_or_ip']): data['public_address'] = [data['tenant_net_name_or_ip']] else: data['public_address'] = self._get_addresses_by_network_name( data['tenant_net_name_or_ip'], data['instance']) if not (data['public_address'] and data['private_address']): raise exception.ManilaException( "Can not find one of net addresses for service instance. " "Instance: %(instance)s, " "private_address: %(private_address)s, " "public_address: %(public_address)s." % data) share_server = { 'username': self.get_config_option('service_instance_user'), 'password': self.get_config_option('service_instance_password'), 'pk_path': self.path_to_private_key, 'instance_id': data['instance']['id'], } for key in ('private_address', 'public_address'): data[key + '_v4'] = None for address in data[key]: if netaddr.valid_ipv4(address): data[key + '_v4'] = address break share_server['ip'] = data['private_address_v4'] share_server['public_address'] = data['public_address_v4'] return {'backend_details': share_server} def _get_addresses_by_network_name(self, net_name, server): net_ips = [] if 'networks' in server and net_name in server['networks']: net_ips = server['networks'][net_name] elif 'addresses' in server and net_name in server['addresses']: net_ips = [addr['addr'] for addr in server['addresses'][net_name]] return net_ips def _get_service_instance_name(self, share_server_id): """Returns service vms name.""" if self.driver_config: # Make service instance name unique for multibackend installation name = "%s_%s" % (self.driver_config.config_group, share_server_id) else: name = share_server_id return self.get_config_option("service_instance_name_template") % name def _get_server_ip(self, server, net_name): """Returns service IP address of service instance.""" net_ips = self._get_addresses_by_network_name(net_name, server) if not net_ips: msg = _("Failed to get service instance IP address. " "Service network name is '%(net_name)s' " "and provided data are '%(data)s'.") msg = msg % {'net_name': net_name, 'data': six.text_type(server)} raise exception.ServiceInstanceException(msg) return net_ips[0] @utils.synchronized( "service_instance_get_or_create_security_group", external=True) def _get_or_create_security_group(self, context, name=None, description=None): """Get or create security group for service_instance. :param context: context, that should be used :param name: this is used for selection/creation of sec.group :param description: this is used on sec.group creation step only :returns: SecurityGroup -- security group instance from Nova :raises: exception.ServiceInstanceException. """ name = name or self.get_config_option( "service_instance_security_group") if not name: LOG.warning(_LW("Name for service instance security group is not " "provided. Skipping security group step.")) return None s_groups = [s for s in self.compute_api.security_group_list(context) if s.name == name] if not s_groups: # Creating security group if not description: description = "This security group is intended "\ "to be used by share service." LOG.debug("Creating security group with name '%s'.", name) sg = self.compute_api.security_group_create( context, name, description) for protocol, ports in const.SERVICE_INSTANCE_SECGROUP_DATA: self.compute_api.security_group_rule_create( context, parent_group_id=sg.id, ip_protocol=protocol, from_port=ports[0], to_port=ports[1], cidr="0.0.0.0/0", ) elif len(s_groups) > 1: msg = _("Ambiguous security_groups.") raise exception.ServiceInstanceException(msg) else: sg = s_groups[0] return sg def ensure_service_instance(self, context, server): """Ensures that server exists and active.""" if 'instance_id' not in server: LOG.warning(_LW("Unable to check server existence since " "'instance_id' key is not set in share server " "backend details.")) return False try: inst = self.compute_api.server_get(self.admin_context, server['instance_id']) except exception.InstanceNotFound: LOG.warning(_LW("Service instance %s does not exist."), server['instance_id']) return False if inst['status'] == 'ACTIVE': return self._check_server_availability(server) return False def _delete_server(self, context, server_id): """Deletes the server.""" try: self.compute_api.server_get(context, server_id) except exception.InstanceNotFound: LOG.debug("Service instance '%s' was not found. " "Nothing to delete, skipping.", server_id) return self.compute_api.server_delete(context, server_id) t = time.time() while time.time() - t < self.max_time_to_build_instance: try: self.compute_api.server_get(context, server_id) except exception.InstanceNotFound: LOG.debug("Service instance '%s' was deleted " "successfully.", server_id) break time.sleep(2) else: raise exception.ServiceInstanceException( _("Instance '%(id)s' has not been deleted in %(s)ss. " "Giving up.") % { 'id': server_id, 's': self.max_time_to_build_instance}) def set_up_service_instance(self, context, network_info): """Finds or creates and sets up service vm. :param context: defines context, that should be used :param network_info: network info for getting allocations :returns: dict with service instance details :raises: exception.ServiceInstanceException """ instance_name = network_info['server_id'] server = self._create_service_instance( context, instance_name, network_info) instance_details = self._get_new_instance_details(server) if not self._check_server_availability(instance_details): e = exception.ServiceInstanceException( _('%(conn_proto)s connection has not been ' 'established to %(server)s in %(time)ss. Giving up.') % { 'conn_proto': self._INSTANCE_CONNECTION_PROTO, 'server': server['ip'], 'time': self.max_time_to_build_instance}) e.detail_data = {'server_details': instance_details} raise e return instance_details def _get_new_instance_details(self, server): instance_details = { 'instance_id': server['id'], 'ip': server['ip'], 'pk_path': server.get('pk_path'), 'subnet_id': server.get('subnet_id'), 'password': self.get_config_option('service_instance_password'), 'username': self.get_config_option('service_instance_user'), 'public_address': server['public_address'], } if server.get('admin_ip'): instance_details['admin_ip'] = server['admin_ip'] if server.get('router_id'): instance_details['router_id'] = server['router_id'] if server.get('service_port_id'): instance_details['service_port_id'] = server['service_port_id'] if server.get('public_port_id'): instance_details['public_port_id'] = server['public_port_id'] if server.get('admin_port_id'): instance_details['admin_port_id'] = server['admin_port_id'] for key in ('password', 'pk_path', 'subnet_id'): if not instance_details[key]: instance_details.pop(key) return instance_details @utils.synchronized("service_instance_get_key", external=True) def _get_key(self, context): """Get ssh key. :param context: defines context, that should be used :returns: tuple with keypair name and path to private key. """ if not (self.path_to_public_key and self.path_to_private_key): return (None, None) path_to_public_key = os.path.expanduser(self.path_to_public_key) path_to_private_key = os.path.expanduser(self.path_to_private_key) if (not os.path.exists(path_to_public_key) or not os.path.exists(path_to_private_key)): return (None, None) keypair_name = self.get_config_option("manila_service_keypair_name") keypairs = [k for k in self.compute_api.keypair_list(context) if k.name == keypair_name] if len(keypairs) > 1: raise exception.ServiceInstanceException(_('Ambiguous keypairs.')) public_key, __ = self._execute('cat', path_to_public_key) if not keypairs: keypair = self.compute_api.keypair_import( context, keypair_name, public_key) else: keypair = keypairs[0] if keypair.public_key != public_key: LOG.debug('Public key differs from existing keypair. ' 'Creating new keypair.') self.compute_api.keypair_delete(context, keypair.id) keypair = self.compute_api.keypair_import( context, keypair_name, public_key) return keypair.name, path_to_private_key def _get_service_image(self, context): """Returns ID of service image for service vm creating.""" service_image_name = self.get_config_option("service_image_name") images = [image.id for image in self.compute_api.image_list(context) if image.name == service_image_name] if len(images) == 1: return images[0] elif not images: raise exception.ServiceInstanceException( _("Image with name '%s' not found.") % service_image_name) else: raise exception.ServiceInstanceException( _("Found more than one image by name '%s'.") % service_image_name) def _create_service_instance(self, context, instance_name, network_info): """Creates service vm and sets up networking for it.""" service_image_id = self._get_service_image(context) key_name, key_path = self._get_key(context) if not (self.get_config_option("service_instance_password") or key_name): raise exception.ServiceInstanceException( _('Neither service instance password nor key are available.')) if not key_path: LOG.warning(_LW( 'No key path is available. May be non-existent key path is ' 'provided. Check path_to_private_key (current value ' '%(private_path)s) and path_to_public_key (current value ' '%(public_path)s) in manila configuration file.'), dict( private_path=self.path_to_private_key, public_path=self.path_to_public_key)) network_data = self.network_helper.setup_network(network_info) fail_safe_data = dict( router_id=network_data.get('router_id'), subnet_id=network_data.get('subnet_id')) if network_data.get('service_port'): fail_safe_data['service_port_id'] = ( network_data['service_port']['id']) if network_data.get('public_port'): fail_safe_data['public_port_id'] = ( network_data['public_port']['id']) if network_data.get('admin_port'): fail_safe_data['admin_port_id'] = ( network_data['admin_port']['id']) try: create_kwargs = self._get_service_instance_create_kwargs() service_instance = self.compute_api.server_create( context, name=instance_name, image=service_image_id, flavor=self.get_config_option("service_instance_flavor_id"), key_name=key_name, nics=network_data['nics'], availability_zone=CONF.storage_availability_zone, **create_kwargs) fail_safe_data['instance_id'] = service_instance['id'] service_instance = self.wait_for_instance_to_be_active( service_instance['id'], self.max_time_to_build_instance) security_group = self._get_or_create_security_group(context) if security_group: if self.network_helper.NAME == NOVA_NAME: # NOTE(vponomaryov): Nova-network allows to assign # secgroups only by names. sg_id = security_group.name else: sg_id = security_group.id LOG.debug( "Adding security group '%(sg)s' to server '%(si)s'.", dict(sg=sg_id, si=service_instance["id"])) self.compute_api.add_security_group_to_server( context, service_instance["id"], sg_id) if self.network_helper.NAME == NEUTRON_NAME: ip = (network_data.get('service_port', network_data.get( 'admin_port'))['fixed_ips']) service_instance['ip'] = ip[0]['ip_address'] public_ip = (network_data.get( 'public_port', network_data.get( 'service_port'))['fixed_ips']) service_instance['public_address'] = public_ip[0]['ip_address'] else: net_name = self.network_helper.get_network_name(network_info) service_instance['ip'] = self._get_server_ip( service_instance, net_name) service_instance['public_address'] = service_instance['ip'] except Exception as e: e.detail_data = {'server_details': fail_safe_data} raise service_instance.update(fail_safe_data) service_instance['pk_path'] = key_path for pair in [('router', 'router_id'), ('service_subnet', 'subnet_id')]: if pair[0] in network_data and 'id' in network_data[pair[0]]: service_instance[pair[1]] = network_data[pair[0]]['id'] admin_port = network_data.get('admin_port') if admin_port: try: service_instance['admin_ip'] = ( admin_port['fixed_ips'][0]['ip_address']) except Exception: msg = _("Admin port is being used but Admin IP was not found.") LOG.exception(msg) raise exception.AdminIPNotFound(reason=msg) return service_instance def _get_service_instance_create_kwargs(self): """Specify extra arguments used when creating the service instance. Classes inheriting the service instance manager can use this to easily pass extra arguments such as user data or metadata. """ return {} def _check_server_availability(self, instance_details): t = time.time() while time.time() - t < self.max_time_to_build_instance: LOG.debug('Checking server availability.') if not self._test_server_connection(instance_details): time.sleep(5) else: return True return False def _test_server_connection(self, server): try: socket.socket().connect((server['ip'], 22)) LOG.debug('Server %s is available via SSH.', server['ip']) return True except socket.error as e: LOG.debug(e) LOG.debug("Server %s is not available via SSH. Waiting...", server['ip']) return False def delete_service_instance(self, context, server_details): """Removes share infrastructure. Deletes service vm and subnet, associated to share network. """ instance_id = server_details.get("instance_id") self._delete_server(context, instance_id) self.network_helper.teardown_network(server_details) def wait_for_instance_to_be_active(self, instance_id, timeout): t = time.time() while time.time() - t < timeout: try: service_instance = self.compute_api.server_get( self.admin_context, instance_id) except exception.InstanceNotFound as e: LOG.debug(e) time.sleep(1) continue instance_status = service_instance['status'] # NOTE(vponomaryov): emptiness of 'networks' field checked as # workaround for nova/neutron bug #1210483. if (instance_status == 'ACTIVE' and service_instance.get('networks', {})): return service_instance elif service_instance['status'] == 'ERROR': break LOG.debug("Waiting for instance %(instance_id)s to be active. " "Current status: %(instance_status)s." % dict(instance_id=instance_id, instance_status=instance_status)) time.sleep(1) raise exception.ServiceInstanceException( _("Instance %(instance_id)s failed to reach active state " "in %(timeout)s seconds. " "Current status: %(instance_status)s.") % dict(instance_id=instance_id, timeout=timeout, instance_status=instance_status)) def reboot_server(self, server, soft_reboot=False): self.compute_api.server_reboot(self.admin_context, server['instance_id'], soft_reboot) @six.add_metaclass(abc.ABCMeta) class BaseNetworkhelper(object): @abc.abstractproperty def NAME(self): """Returns code name of network helper.""" @abc.abstractmethod def __init__(self, service_instance_manager): """Instantiates class and its attrs.""" @abc.abstractmethod def get_network_name(self, network_info): """Returns name of network for service instance.""" @abc.abstractmethod def setup_connectivity_with_service_instances(self): """Sets up connectivity between Manila host and service instances.""" @abc.abstractmethod def setup_network(self, network_info): """Sets up network for service instance.""" @abc.abstractmethod def teardown_network(self, server_details): """Teardowns network resources provided for service instance.""" class NeutronNetworkHelper(BaseNetworkhelper): def __init__(self, service_instance_manager): self.get_config_option = service_instance_manager.get_config_option self.vif_driver = importutils.import_class( self.get_config_option("interface_driver"))() if service_instance_manager.driver_config: self._network_config_group = ( service_instance_manager.driver_config.network_config_group or service_instance_manager.driver_config.config_group) else: self._network_config_group = None self.use_admin_port = False self.use_service_network = True self._neutron_api = None self._service_network_id = None self.connect_share_server_to_tenant_network = ( self.get_config_option('connect_share_server_to_tenant_network')) self.admin_network_id = self.get_config_option('admin_network_id') self.admin_subnet_id = self.get_config_option('admin_subnet_id') if self.admin_network_id and self.admin_subnet_id: self.use_admin_port = True if self.use_admin_port and self.connect_share_server_to_tenant_network: self.use_service_network = False @property def NAME(self): return NEUTRON_NAME @property def admin_project_id(self): return self.neutron_api.admin_project_id @property @utils.synchronized("instantiate_neutron_api_neutron_net_helper") def neutron_api(self): if not self._neutron_api: self._neutron_api = neutron.API( config_group_name=self._network_config_group) return self._neutron_api @property @utils.synchronized("service_network_id_neutron_net_helper") def service_network_id(self): if not self._service_network_id: self._service_network_id = self._get_service_network_id() return self._service_network_id def get_network_name(self, network_info): """Returns name of network for service instance.""" net = self.neutron_api.get_network(network_info['neutron_net_id']) return net['name'] @utils.synchronized("service_instance_get_service_network", external=True) def _get_service_network_id(self): """Finds existing or creates new service network.""" service_network_name = self.get_config_option("service_network_name") networks = [] for network in self.neutron_api.get_all_admin_project_networks(): if network['name'] == service_network_name: networks.append(network) if len(networks) > 1: raise exception.ServiceInstanceException( _('Ambiguous service networks.')) elif not networks: return self.neutron_api.network_create( self.admin_project_id, service_network_name)['id'] else: return networks[0]['id'] @utils.synchronized( "service_instance_setup_and_teardown_network_for_instance", external=True) def teardown_network(self, server_details): subnet_id = server_details.get("subnet_id") router_id = server_details.get("router_id") service_port_id = server_details.get("service_port_id") public_port_id = server_details.get("public_port_id") admin_port_id = server_details.get("admin_port_id") for port_id in (service_port_id, public_port_id, admin_port_id): if port_id: try: self.neutron_api.delete_port(port_id) except exception.NetworkException as e: if e.kwargs.get('code') != 404: raise LOG.debug("Failed to delete port %(port_id)s with error: " "\n %(exc)s", {"port_id": port_id, "exc": e}) if router_id and subnet_id: ports = self.neutron_api.list_ports( fields=['fixed_ips', 'device_id', 'device_owner']) # NOTE(vponomaryov): iterate ports to get to know whether current # subnet is used or not. We will not remove it from router if it # is used. for port in ports: # NOTE(vponomaryov): if device_id is present, then we know that # this port is used. Also, if device owner is 'compute:*', then # we know that it is VM. We continue only if both are 'True'. if (port['device_id'] and port['device_owner'].startswith('compute:')): for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == subnet_id: # NOTE(vponomaryov): There are other share servers # exist that use this subnet. So, do not remove it # from router. return try: # NOTE(vponomaryov): there is no other share servers or # some VMs that use this subnet. So, remove it from router. self.neutron_api.router_remove_interface( router_id, subnet_id) except exception.NetworkException as e: if e.kwargs['code'] != 404: raise LOG.debug('Subnet %(subnet_id)s is not attached to the ' 'router %(router_id)s.', {'subnet_id': subnet_id, 'router_id': router_id}) self.neutron_api.update_subnet(subnet_id, '') @utils.synchronized( "service_instance_setup_and_teardown_network_for_instance", external=True) def setup_network(self, network_info): neutron_net_id = network_info['neutron_net_id'] neutron_subnet_id = network_info['neutron_subnet_id'] network_data = dict() subnet_name = ('service_subnet_for_handling_of_share_server_for_' 'tenant_subnet_%s' % neutron_subnet_id) if self.use_service_network: network_data['service_subnet'] = self._get_service_subnet( subnet_name) if not network_data['service_subnet']: network_data['service_subnet'] = ( self.neutron_api.subnet_create( self.admin_project_id, self.service_network_id, subnet_name, self._get_cidr_for_subnet())) network_data['ports'] = [] if not self.connect_share_server_to_tenant_network: network_data['router'] = self._get_private_router( neutron_net_id, neutron_subnet_id) try: self.neutron_api.router_add_interface( network_data['router']['id'], network_data['service_subnet']['id']) except exception.NetworkException as e: if e.kwargs['code'] != 400: raise LOG.debug('Subnet %(subnet_id)s is already attached to the ' 'router %(router_id)s.', {'subnet_id': network_data['service_subnet']['id'], 'router_id': network_data['router']['id']}) else: network_data['public_port'] = self.neutron_api.create_port( self.admin_project_id, neutron_net_id, subnet_id=neutron_subnet_id, device_owner='manila') network_data['ports'].append(network_data['public_port']) if self.use_service_network: network_data['service_port'] = self.neutron_api.create_port( self.admin_project_id, self.service_network_id, subnet_id=network_data['service_subnet']['id'], device_owner='manila') network_data['ports'].append(network_data['service_port']) if self.use_admin_port: network_data['admin_port'] = self.neutron_api.create_port( self.admin_project_id, self.admin_network_id, subnet_id=self.admin_subnet_id, device_owner='manila') network_data['ports'].append(network_data['admin_port']) try: self.setup_connectivity_with_service_instances() except Exception: for port in network_data['ports']: self.neutron_api.delete_port(port['id']) raise network_data['nics'] = [ {'port-id': port['id']} for port in network_data['ports']] public_ip = network_data.get( 'public_port', network_data.get('service_port')) network_data['ip_address'] = public_ip['fixed_ips'][0]['ip_address'] return network_data def _get_cidr_for_subnet(self): """Returns not used cidr for service subnet creating.""" subnets = self._get_all_service_subnets() used_cidrs = set(subnet['cidr'] for subnet in subnets) serv_cidr = netaddr.IPNetwork( self.get_config_option("service_network_cidr")) division_mask = self.get_config_option("service_network_division_mask") for subnet in serv_cidr.subnet(division_mask): cidr = six.text_type(subnet.cidr) if cidr not in used_cidrs: return cidr else: raise exception.ServiceInstanceException(_('No available cidrs.')) def setup_connectivity_with_service_instances(self): """Sets up connectivity with service instances. Creates host port in service network and/or admin network, creating and setting up required network devices. """ if self.use_service_network: port = self._get_service_port( self.service_network_id, None, 'manila-share') port = self._add_fixed_ips_to_service_port(port) interface_name = self.vif_driver.get_device_name(port) device = ip_lib.IPDevice(interface_name) self._plug_interface_in_host(interface_name, device, port) if self.use_admin_port: port = self._get_service_port( self.admin_network_id, self.admin_subnet_id, 'manila-admin-share') interface_name = self.vif_driver.get_device_name(port) device = ip_lib.IPDevice(interface_name) for fixed_ip in port['fixed_ips']: subnet = self.neutron_api.get_subnet(fixed_ip['subnet_id']) device.route.clear_outdated_routes(subnet['cidr']) self._plug_interface_in_host(interface_name, device, port) def _plug_interface_in_host(self, interface_name, device, port): self.vif_driver.plug(interface_name, port['id'], port['mac_address']) ip_cidrs = [] for fixed_ip in port['fixed_ips']: subnet = self.neutron_api.get_subnet(fixed_ip['subnet_id']) net = netaddr.IPNetwork(subnet['cidr']) ip_cidr = '%s/%s' % (fixed_ip['ip_address'], net.prefixlen) ip_cidrs.append(ip_cidr) self.vif_driver.init_l3(interface_name, ip_cidrs) # ensure that interface is first in the list device.route.pullup_route(interface_name) # here we are checking for garbage devices from removed service port self._remove_outdated_interfaces(device) @utils.synchronized( "service_instance_remove_outdated_interfaces", external=True) def _remove_outdated_interfaces(self, device): """Finds and removes unused network device.""" device_cidr_set = self._get_set_of_device_cidrs(device) for dev in ip_lib.IPWrapper().get_devices(): if dev.name != device.name and dev.name[:3] == device.name[:3]: cidr_set = self._get_set_of_device_cidrs(dev) if device_cidr_set & cidr_set: self.vif_driver.unplug(dev.name) def _get_set_of_device_cidrs(self, device): cidrs = set() addr_list = [] try: # NOTE(ganso): I could call ip_lib.device_exists here, but since # this is a concurrency problem, it would not fix the problem. addr_list = device.addr.list() except Exception as e: if 'does not exist' in six.text_type(e): LOG.warning(_LW( "Device %s does not exist anymore.") % device.name) else: raise for addr in addr_list: if addr['ip_version'] == 4: cidrs.add(six.text_type(netaddr.IPNetwork(addr['cidr']).cidr)) return cidrs @utils.synchronized("service_instance_get_service_port", external=True) def _get_service_port(self, network_id, subnet_id, device_id): """Find or creates service neutron port. This port will be used for connectivity with service instances. """ host = socket.gethostname() search_opts = {'device_id': device_id, 'binding:host_id': host} ports = [port for port in self.neutron_api. list_ports(**search_opts)] if len(ports) > 1: raise exception.ServiceInstanceException( _('Error. Ambiguous service ports.')) elif not ports: port = self.neutron_api.create_port( self.admin_project_id, network_id, subnet_id=subnet_id, device_id=device_id, device_owner='manila:share', host_id=host) else: port = ports[0] return port @utils.synchronized( "service_instance_add_fixed_ips_to_service_port", external=True) def _add_fixed_ips_to_service_port(self, port): network = self.neutron_api.get_network(self.service_network_id) subnets = set(network['subnets']) port_fixed_ips = [] for fixed_ip in port['fixed_ips']: port_fixed_ips.append({'subnet_id': fixed_ip['subnet_id'], 'ip_address': fixed_ip['ip_address']}) if fixed_ip['subnet_id'] in subnets: subnets.remove(fixed_ip['subnet_id']) # If there are subnets here that means that # we need to add those to the port and call update. if subnets: port_fixed_ips.extend([dict(subnet_id=s) for s in subnets]) port = self.neutron_api.update_port_fixed_ips( port['id'], {'fixed_ips': port_fixed_ips}) return port @utils.synchronized("service_instance_get_private_router", external=True) def _get_private_router(self, neutron_net_id, neutron_subnet_id): """Returns router attached to private subnet gateway.""" private_subnet = self.neutron_api.get_subnet(neutron_subnet_id) if not private_subnet['gateway_ip']: raise exception.ServiceInstanceException( _('Subnet must have gateway.')) private_network_ports = [p for p in self.neutron_api.list_ports( network_id=neutron_net_id)] for p in private_network_ports: fixed_ip = p['fixed_ips'][0] if (fixed_ip['subnet_id'] == private_subnet['id'] and fixed_ip['ip_address'] == private_subnet['gateway_ip']): private_subnet_gateway_port = p break else: raise exception.ServiceInstanceException( _('Subnet gateway is not attached to the router.')) private_subnet_router = self.neutron_api.show_router( private_subnet_gateway_port['device_id']) return private_subnet_router @utils.synchronized("service_instance_get_service_subnet", external=True) def _get_service_subnet(self, subnet_name): all_service_subnets = self._get_all_service_subnets() service_subnets = [subnet for subnet in all_service_subnets if subnet['name'] == subnet_name] if len(service_subnets) == 1: return service_subnets[0] elif not service_subnets: unused_service_subnets = [subnet for subnet in all_service_subnets if subnet['name'] == ''] if unused_service_subnets: service_subnet = unused_service_subnets[0] self.neutron_api.update_subnet( service_subnet['id'], subnet_name) return service_subnet return None else: raise exception.ServiceInstanceException( _('Ambiguous service subnets.')) @utils.synchronized( "service_instance_get_all_service_subnets", external=True) def _get_all_service_subnets(self): service_network = self.neutron_api.get_network(self.service_network_id) subnets = [] for subnet_id in service_network['subnets']: subnets.append(self.neutron_api.get_subnet(subnet_id)) return subnets class NovaNetworkHelper(BaseNetworkhelper): """Nova network helper for Manila service instances. All security-group rules are applied to all interfaces of Nova VM using Nova-network. In that case there is no need to create additional service network. Only one thing should be satisfied - Manila host should have access to all tenant networks. This network helper does not create resources. """ def __init__(self, service_instance_manager): self.compute_api = service_instance_manager.compute_api self.admin_context = service_instance_manager.admin_context @property def NAME(self): return NOVA_NAME def setup_network(self, network_info): net = self._get_nova_network(network_info['nova_net_id']) network_info['nics'] = [{'net-id': net['id']}] return network_info def get_network_name(self, network_info): """Returns name of network for service instance.""" return self._get_nova_network(network_info['nova_net_id'])['label'] def teardown_network(self, server_details): """Nothing to do. Placeholder.""" def setup_connectivity_with_service_instances(self): """Nothing to do. Placeholder.""" def _get_nova_network(self, nova_network_id): """Returns network to be used for service instance. :param nova_network_id: string with id of network. :returns: dict -- network data as dict :raises: exception.ManilaException """ if not nova_network_id: raise exception.ManilaException( _('Nova network for service instance is not provided.')) net = self.compute_api.network_get(self.admin_context, nova_network_id) return net manila-2.0.0/manila/share/drivers/hitachi/0000775000567000056710000000000012701407265021551 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hitachi/ssh.py0000664000567000056710000005377412701407107022733 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_log import log from oslo_utils import strutils from oslo_utils import units import paramiko import six import time from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila import utils as mutils LOG = log.getLogger(__name__) class HNASSSHBackend(object): def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key, cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout): self.ip = hnas_ip self.port = 22 self.user = hnas_username self.password = hnas_password self.priv_key = ssh_private_key self.admin_ip0 = cluster_admin_ip0 self.evs_id = six.text_type(evs_id) self.fs_name = fs_name self.evs_ip = evs_ip self.sshpool = None self.job_timeout = job_timeout LOG.debug("Hitachi HNAS Driver using SSH backend.") def get_stats(self): """Get the stats from file-system. :returns: fs_capacity.size = Total size from filesystem. available_space = Free space currently on filesystem. """ command = ['df', '-a', '-f', self.fs_name] output, err = self._execute(command) line = output.split('\n') fs_capacity = Capacity(line[3]) available_space = fs_capacity.size - fs_capacity.used return fs_capacity.size, available_space def nfs_export_add(self, share_id): path = '/shares/' + share_id command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1', path, self.fs_name, path] self._execute(command) def nfs_export_del(self, share_id): path = '/shares/' + share_id command = ['nfs-export', 'del', path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'does not exist' in e.stderr: LOG.warning(_LW("Export %s does not exist on " "backend anymore."), path) else: msg = six.text_type(e) LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def get_host_list(self, share_id): export = self._get_share_export(share_id) return export[0].export_configuration def update_access_rule(self, share_id, host_list): command = ['nfs-export', 'mod', '-c'] if len(host_list) == 0: command.append('127.0.0.1') else: string_command = '"' + six.text_type(host_list[0]) for i in range(1, len(host_list)): string_command += ',' + (six.text_type(host_list[i])) string_command += '"' command.append(string_command) path = '/shares/' + share_id command.append(path) self._execute(command) def tree_clone(self, src_path, dest_path): command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, src_path, dest_path] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if ('Cannot find any clonable files in the source directory' in e.stderr): msg = _("Source path %s is empty") % src_path raise exception.HNASNothingToCloneException(msg) else: msg = six.text_type(e) LOG.exception(msg) raise exception.HNASBackendException(msg=msg) job_submit = JobSubmit(output) if job_submit.request_status == 'Request submitted successfully': job_id = job_submit.job_id job_status = None progress = '' job_rechecks = 0 starttime = time.time() deadline = starttime + self.job_timeout while (not job_status or job_status.job_state != "Job was completed"): command = ['tree-clone-job-status', job_id] output, err = self._execute(command) job_status = JobStatus(output) if job_status.job_state == 'Job failed': break old_progress = progress progress = job_status.data_bytes_processed if old_progress == progress: job_rechecks += 1 now = time.time() if now > deadline: command = ['tree-clone-job-abort', job_id] self._execute(command) LOG.error(_LE("Timeout in snapshot creation from " "source path %s.") % src_path) msg = (_("Share snapshot of source path %s " "was not created.") % src_path) raise exception.HNASBackendException(msg=msg) else: time.sleep(job_rechecks ** 2) else: job_rechecks = 0 if (job_status.job_state, job_status.job_status, job_status.directories_missing, job_status.files_missing) == ("Job was completed", "Success", '0', '0'): LOG.debug("Snapshot of source path %(src)s to destination" "path %(dest)s created successfully.", {'src': src_path, 'dest': dest_path}) else: LOG.error(_LE('Error creating snapshot of source path %s.'), src_path) msg = (_('Snapshot of source path %s was not created.') % src_path) raise exception.HNASBackendException(msg=msg) def tree_delete(self, path): command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'Source path: Cannot access' in e.stderr: LOG.warning(_LW("Attempted to delete path %s " "but it does not exist."), path) else: msg = six.text_type(e) LOG.exception(msg) raise e def create_directory(self, dest_path): self._locked_selectfs('create', dest_path) def delete_directory(self, path): self._locked_selectfs('delete', path) def check_fs_mounted(self): fs_list = self._get_filesystem_list() for i in range(0, len(fs_list)): if fs_list[i].name == self.fs_name: if fs_list[i].state == 'Mount': return True else: return False msg = (_("Filesystem %s does not exist or it is not available " "in the current EVS context.") % self.fs_name) raise exception.HNASItemNotFoundException(msg=msg) def mount(self): command = ['mount', self.fs_name] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'file system is already mounted' not in e.stderr: msg = six.text_type(e) LOG.exception(msg) raise e def vvol_create(self, vvol_name): # create a virtual-volume inside directory path = '/shares/' + vvol_name command = ['virtual-volume', 'add', '--ensure', self.fs_name, vvol_name, path] self._execute(command) def vvol_delete(self, vvol_name): path = '/shares/' + vvol_name # Virtual-volume and quota are deleted together command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'Source path: Cannot access' in e.stderr: LOG.debug("Share %(shr)s does not exist.", {'shr': vvol_name}) else: msg = six.text_type(e) LOG.exception(msg) raise e def quota_add(self, vvol_name, vvol_quota): str_quota = six.text_type(vvol_quota) + 'G' command = ['quota', 'add', '--usage-limit', str_quota, '--usage-hard-limit', 'yes', self.fs_name, vvol_name] self._execute(command) def modify_quota(self, vvol_name, new_size): str_quota = six.text_type(new_size) + 'G' command = ['quota', 'mod', '--usage-limit', str_quota, self.fs_name, vvol_name] self._execute(command) def check_vvol(self, vvol_name): command = ['virtual-volume', 'list', '--verbose', self.fs_name, vvol_name] try: self._execute(command) except processutils.ProcessExecutionError as e: msg = six.text_type(e) LOG.exception(msg) msg = (_("Virtual volume %s does not exist.") % vvol_name) raise exception.HNASItemNotFoundException(msg=msg) def check_quota(self, vvol_name): command = ['quota', 'list', '--verbose', self.fs_name, vvol_name] output, err = self._execute(command) if 'No quotas matching specified filter criteria' in output: msg = (_("Virtual volume %s does not have any quota.") % vvol_name) raise exception.HNASItemNotFoundException(msg=msg) def check_export(self, vvol_name): export = self._get_share_export(vvol_name) if (vvol_name in export[0].export_name and self.fs_name in export[0].file_system_label): return else: msg = _("Export %s does not exist.") % export[0].export_name raise exception.HNASItemNotFoundException(msg=msg) def get_share_quota(self, share_id): command = ['quota', 'list', self.fs_name, share_id] output, err = self._execute(command) quota = Quota(output) if quota.limit is None: return None if quota.limit_unit == 'TB': return quota.limit * units.Ki elif quota.limit_unit == 'GB': return quota.limit else: msg = (_("Share %s does not support quota values " "below 1G.") % share_id) raise exception.HNASBackendException(msg=msg) def get_share_usage(self, share_id): command = ['quota', 'list', self.fs_name, share_id] output, err = self._execute(command) quota = Quota(output) if quota.usage is None: msg = (_("Virtual volume %s does not have any quota.") % share_id) raise exception.HNASItemNotFoundException(msg=msg) else: bytes_usage = strutils.string_to_bytes(six.text_type(quota.usage) + quota.usage_unit) return bytes_usage / units.Gi def _get_share_export(self, share_id): share_id = '/shares/' + share_id command = ['nfs-export', 'list ', share_id] export_list = [] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if 'does not exist' in e.stderr: msg = _("Export %(share)s was not found in EVS " "%(evs_id)s") % {'share': share_id, 'evs_id': self.evs_id} raise exception.HNASItemNotFoundException(msg=msg) else: raise items = output.split('Export name') if items[0][0] == '\n': items.pop(0) for i in range(0, len(items)): export_list.append(Export(items[i])) return export_list def _get_filesystem_list(self): command = ['filesystem-list', self.fs_name] output, err = self._execute(command) items = output.split('\n') filesystem_list = [] fs_name = None if len(items) > 2: j = 0 for i in range(2, len(items) - 1): if "Filesystem " in items[i] and len(items[i].split()) == 2: description, fs_name = items[i].split() fs_name = fs_name[:len(fs_name) - 1] elif "NoEVS" not in items[i]: # Not considering FS without EVS filesystem_list.append(FileSystem(items[i])) if fs_name is not None: filesystem_list[j].name = fs_name fs_name = None j += 1 else: LOG.debug("Ignoring filesystems without EVS.") return filesystem_list @mutils.retry(exception=exception.HNASConnException, wait_random=True) def _execute(self, commands): command = ['ssc', '127.0.0.1'] if self.admin_ip0 is not None: command = ['ssc', '--smuauth', self.admin_ip0] command = command + ['console-context', '--evs', self.evs_id] commands = command + commands mutils.check_ssh_injection(commands) commands = ' '.join(commands) if not self.sshpool: self.sshpool = mutils.SSHPool(ip=self.ip, port=self.port, conn_timeout=None, login=self.user, password=self.password, privatekey=self.priv_key) with self.sshpool.item() as ssh: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: out, err = processutils.ssh_execute(ssh, commands, check_exit_code=True) LOG.debug("Command %(cmd)s result: out = %(out)s - err = " "%(err)s.", {'cmd': commands, 'out': out, 'err': err}) return out, err except processutils.ProcessExecutionError as e: if 'Failed to establish SSC connection' in e.stderr: LOG.debug("SSC connection error!") msg = _("Failed to establish SSC connection.") raise exception.HNASConnException(msg=msg) else: LOG.debug("Command %(cmd)s result: out = %(out)s - err = " "%(err)s - exit = %(exit)s.", {'cmd': e.cmd, 'out': e.stdout, 'err': e.stderr, 'exit': e.exit_code}) LOG.error(_LE("Error running SSH command.")) raise @mutils.synchronized("hds_hnas_select_fs", external=True) def _locked_selectfs(self, op, path): if op == 'create': command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', self.evs_id, 'mkdir', '-p', path] self._execute(command) if op == 'delete': command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', self.evs_id, 'rmdir', path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'DirectoryNotEmpty' in e.stderr: LOG.debug("Share %(path)s has more snapshots.", {'path': path}) elif 'NotFound' in e.stderr: LOG.warning(_LW("Attempted to delete path %s but it does " "not exist."), path) else: msg = six.text_type(e) LOG.exception(msg) raise e class FileSystem(object): def __init__(self, data): if data: items = data.split() if len(items) >= 7: self.name = items[0] self.dev = items[1] self.on_span = items[2] self.state = items[3] self.evs = int(items[4]) self.capacity = int(items[5]) self.confined = int(items[6]) if len(items) == 8: self.flag = items[7] else: self.flag = '' class Export(object): def __init__(self, data): if data: split_data = data.split('Export configuration:\n') items = split_data[0].split('\n') self.export_name = items[0].split(':')[1].strip() self.export_path = items[1].split(':')[1].strip() if '*** not available ***' in items[2]: self.file_system_info = items[2].split(':')[1].strip() index = 0 else: self.file_system_label = items[2].split(':')[1].strip() self.file_system_size = items[3].split(':')[1].strip() self.file_system_free_space = items[4].split(':')[1].strip() self.file_system_state = items[5].split(':')[1] self.formatted = items[6].split('=')[1].strip() self.mounted = items[7].split('=')[1].strip() self.failed = items[8].split('=')[1].strip() self.thin_provisioned = items[9].split('=')[1].strip() index = 7 self.access_snapshots = items[3 + index].split(':')[1].strip() self.display_snapshots = items[4 + index].split(':')[1].strip() self.read_caching = items[5 + index].split(':')[1].strip() self.disaster_recovery_setting = items[6 + index].split(':')[1] self.recovered = items[7 + index].split('=')[1].strip() self.transfer_setting = items[8 + index].split('=')[1].strip() self.export_configuration = [] export_config = split_data[1].split('\n') for i in range(0, len(export_config)): if any(j.isdigit() or j.isalpha() for j in export_config[i]): self.export_configuration.append(export_config[i]) class JobStatus(object): def __init__(self, data): if data: lines = data.split("\n") self.job_id = lines[0].split()[3] self.physical_node = lines[2].split()[3] self.evs = lines[3].split()[2] self.volume_number = lines[4].split()[3] self.fs_id = lines[5].split()[4] self.fs_name = lines[6].split()[4] self.source_path = lines[7].split()[3] self.creation_time = " ".join(lines[8].split()[3:5]) self.destination_path = lines[9].split()[3] self.ensure_path_exists = lines[10].split()[5] self.job_state = " ".join(lines[12].split()[3:]) self.job_started = " ".join(lines[14].split()[2:4]) self.job_ended = " ".join(lines[15].split()[2:4]) self.job_status = lines[16].split()[2] error_details_line = lines[17].split() if len(error_details_line) > 3: self.error_details = " ".join(error_details_line[3:]) else: self.error_details = None self.directories_processed = lines[18].split()[3] self.files_processed = lines[19].split()[3] self.data_bytes_processed = lines[20].split()[4] self.directories_missing = lines[21].split()[4] self.files_missing = lines[22].split()[4] self.files_skipped = lines[23].split()[4] skipping_details_line = lines[24].split() if len(skipping_details_line) > 3: self.skipping_details = " ".join(skipping_details_line[3:]) else: self.skipping_details = None class JobSubmit(object): def __init__(self, data): if data: split_data = data.replace(".", "").split() self.request_status = " ".join(split_data[1:4]) self.job_id = split_data[8] class Capacity(object): def __init__(self, data): if data: items = data.split() self.id = items[0] self.label = items[1] self.evs = items[2] self.size = float(items[3]) self.size_measure = items[4] if self.size_measure == 'TB': self.size = self.size * units.Ki self.used = float(items[5]) self.used_measure = items[6] if self.used_measure == 'TB': self.used = self.used * units.Ki class Quota(object): def __init__(self, data): if data: if 'No quotas matching' in data: self.type = None self.target = None self.usage = None self.usage_unit = None self.limit = None self.limit_unit = None else: items = data.split() self.type = items[2] self.target = items[6] self.usage = items[9] self.usage_unit = items[10] if items[13] == 'Unset': self.limit = None else: self.limit = float(items[13]) self.limit_unit = items[14] manila-2.0.0/manila/share/drivers/hitachi/__init__.py0000664000567000056710000000000012701407107023643 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hitachi/hds_hnas.py0000664000567000056710000006125412701407107023715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils import six from manila.common import constants from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila.i18n import _LW from manila.share import driver LOG = log.getLogger(__name__) hds_hnas_opts = [ cfg.StrOpt('hds_hnas_ip', help="HNAS management interface IP for communication " "between Manila controller and HNAS."), cfg.StrOpt('hds_hnas_user', help="HNAS username Base64 String in order to perform tasks " "such as create file-systems and network interfaces."), cfg.StrOpt('hds_hnas_password', secret=True, help="HNAS user password. Required only if private key is not " "provided."), cfg.IntOpt('hds_hnas_evs_id', help="Specify which EVS this backend is assigned to."), cfg.StrOpt('hds_hnas_evs_ip', help="Specify IP for mounting shares."), cfg.StrOpt('hds_hnas_file_system_name', help="Specify file-system name for creating shares."), cfg.StrOpt('hds_hnas_ssh_private_key', secret=True, help="RSA/DSA private key value used to connect into HNAS. " "Required only if password is not provided."), cfg.StrOpt('hds_hnas_cluster_admin_ip0', help="The IP of the clusters admin node. Only set in HNAS " "multinode clusters."), cfg.IntOpt('hds_hnas_stalled_job_timeout', default=30, help="The time (in seconds) to wait for stalled HNAS jobs " "before aborting."), cfg.StrOpt('hds_hnas_driver_helper', default='manila.share.drivers.hitachi.ssh.HNASSSHBackend', help="Python class to be used for driver helper."), ] CONF = cfg.CONF CONF.register_opts(hds_hnas_opts) class HDSHNASDriver(driver.ShareDriver): """Manila HNAS Driver implementation. 1.0.0 - Initial Version. 2.0.0 - Refactoring, bugfixes, implemented Share Shrink and Update Access. """ def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug("Invoking base constructor for Manila HDS HNAS Driver.") super(HDSHNASDriver, self).__init__(False, *args, **kwargs) LOG.debug("Setting up attributes for Manila HDS HNAS Driver.") self.configuration.append_config_values(hds_hnas_opts) LOG.debug("Reading config parameters for Manila HDS HNAS Driver.") self.backend_name = self.configuration.safe_get('share_backend_name') hnas_helper = self.configuration.safe_get('hds_hnas_driver_helper') hnas_ip = self.configuration.safe_get('hds_hnas_ip') hnas_username = self.configuration.safe_get('hds_hnas_user') hnas_password = self.configuration.safe_get('hds_hnas_password') hnas_evs_id = self.configuration.safe_get('hds_hnas_evs_id') self.hnas_evs_ip = self.configuration.safe_get('hds_hnas_evs_ip') self.fs_name = self.configuration.safe_get('hds_hnas_file_system_name') ssh_private_key = self.configuration.safe_get( 'hds_hnas_ssh_private_key') cluster_admin_ip0 = self.configuration.safe_get( 'hds_hnas_cluster_admin_ip0') self.private_storage = kwargs.get('private_storage') job_timeout = self.configuration.safe_get( 'hds_hnas_stalled_job_timeout') if hnas_helper is None: msg = _("The config parameter hds_hnas_driver_helper is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_evs_id is None: msg = _("The config parameter hds_hnas_evs_id is not set.") raise exception.InvalidParameterValue(err=msg) if self.hnas_evs_ip is None: msg = _("The config parameter hds_hnas_evs_ip is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_ip is None: msg = _("The config parameter hds_hnas_ip is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_username is None: msg = _("The config parameter hds_hnas_user is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_password is None and ssh_private_key is None: msg = _("Credentials configuration parameters missing: " "you need to set hds_hnas_password or " "hds_hnas_ssh_private_key.") raise exception.InvalidParameterValue(err=msg) LOG.debug("Initializing HNAS Layer.") helper = importutils.import_class(hnas_helper) self.hnas = helper(hnas_ip, hnas_username, hnas_password, ssh_private_key, cluster_admin_ip0, hnas_evs_id, self.hnas_evs_ip, self.fs_name, job_timeout) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. This list is enough to update the access rules for given share. :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. Not used by this driver. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. Not used by this driver. :param share_server: Data structure with share server information. Not used by this driver. """ try: self._ensure_share(share['id']) except exception.HNASItemNotFoundException: raise exception.ShareResourceNotFound(share_id=share['id']) host_list = [] share_id = self._get_hnas_share_id(share['id']) for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type currently supported.") raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] == constants.ACCESS_LEVEL_RW: host_list.append(rule['access_to'] + '(' + rule['access_level'] + ',norootsquash)') else: host_list.append(rule['access_to'] + '(' + rule['access_level'] + ')') self.hnas.update_access_rule(share_id, host_list) if host_list: LOG.debug("Share %(share)s has the rules: %(rules)s", {'share': share_id, 'rules': ', '.join(host_list)}) else: LOG.debug("Share %(share)s has no rules.", {'share': share_id}) def create_share(self, context, share, share_server=None): """Creates share. :param context: The `context.RequestContext` object for the request :param share: Share that will be created. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a path of EVS IP concatenate with the path of share in the filesystem (e.g. ['172.24.44.10:/shares/id']). """ LOG.debug("Creating share in HNAS: %(shr)s.", {'shr': share['id']}) if share['share_proto'].lower() != 'nfs': msg = _("Only NFS protocol is currently supported.") raise exception.ShareBackendException(msg=msg) path = self._create_share(share['id'], share['size']) uri = self.hnas_evs_ip + ":" + path LOG.debug("Share created successfully on path: %(uri)s.", {'uri': uri}) return uri def delete_share(self, context, share, share_server=None): """Deletes share. :param context: The `context.RequestContext` object for the request :param share: Share that will be deleted. :param share_server: Data structure with share server information. Not used by this driver. """ share_id = self._get_hnas_share_id(share['id']) LOG.debug("Deleting share in HNAS: %(shr)s.", {'shr': share['id']}) self._delete_share(share_id) def create_snapshot(self, context, snapshot, share_server=None): """Creates snapshot. :param context: The `context.RequestContext` object for the request :param snapshot: Snapshot that will be created. :param share_server: Data structure with share server information. Not used by this driver. """ share_id = self._get_hnas_share_id(snapshot['share_id']) LOG.debug("The snapshot of share %(ss_sid)s will be created with " "id %(ss_id)s.", {'ss_sid': snapshot['share_id'], 'ss_id': snapshot['id']}) self._create_snapshot(share_id, snapshot['id']) LOG.info(_LI("Snapshot %(id)s successfully created."), {'id': snapshot['id']}) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes snapshot. :param context: The `context.RequestContext` object for the request :param snapshot: Snapshot that will be deleted. :param share_server:Data structure with share server information. Not used by this driver. """ share_id = self._get_hnas_share_id(snapshot['share_id']) LOG.debug("The snapshot %(ss_sid)s will be deleted. The related " "share ID is %(ss_id)s.", {'ss_sid': snapshot['share_id'], 'ss_id': snapshot['id']}) self._delete_snapshot(share_id, snapshot['id']) LOG.info(_LI("Snapshot %(id)s successfully deleted."), {'id': snapshot['id']}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Creates a new share from snapshot. :param context: The `context.RequestContext` object for the request :param share: Information about the new share. :param snapshot: Information about the snapshot that will be copied to new share. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a path of EVS IP concatenate with the path of new share in the filesystem (e.g. ['172.24.44.10:/shares/id']). """ LOG.debug("Creating a new share from snapshot: %(ss_id)s.", {'ss_id': snapshot['id']}) path = self._create_share_from_snapshot(share, snapshot) uri = self.hnas_evs_ip + ":" + path LOG.debug("Share created successfully on path: %(uri)s.", {'uri': uri}) return uri def ensure_share(self, context, share, share_server=None): """Ensure that share is exported. :param context: The `context.RequestContext` object for the request :param share: Share that will be checked. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a list of EVS IP concatenated with the path of share in the filesystem (e.g. ['172.24.44.10:/shares/id']). """ LOG.debug("Ensuring share in HNAS: %(shr)s.", {'shr': share['id']}) share_id = self._get_hnas_share_id(share['id']) path = self._ensure_share(share_id) export = self.hnas_evs_ip + ":" + path export_list = [export] LOG.debug("Share ensured in HNAS: %(shr)s.", {'shr': share['id']}) return export_list def extend_share(self, share, new_size, share_server=None): """Extends a share to new size. :param share: Share that will be extended. :param new_size: New size of share. :param share_server: Data structure with share server information. Not used by this driver. """ share_id = self._get_hnas_share_id(share['id']) LOG.debug("Expanding share in HNAS: %(shr_id)s.", {'shr_id': share['id']}) self._extend_share(share_id, share['size'], new_size) LOG.info(_LI("Share %(shr_id)s successfully extended to " "%(shr_size)s."), {'shr_id': share['id'], 'shr_size': six.text_type(new_size)}) # TODO(alyson): Implement in DHSS = true mode def get_network_allocations_number(self): """Track allocations_number in DHSS = true. When using the setting driver_handles_share_server = false does not require to track allocations_number because we do not handle network stuff. """ return 0 def _update_share_stats(self, data=None): """Updates the Capability of Backend.""" LOG.debug("Updating Backend Capability Information - HDS HNAS.") self._check_fs_mounted() total_space, free_space = self.hnas.get_stats() reserved = self.configuration.safe_get('reserved_share_percentage') data = { 'share_backend_name': self.backend_name, 'driver_handles_share_servers': self.driver_handles_share_servers, 'vendor_name': 'HDS', 'driver_version': '2.0.0', 'storage_protocol': 'NFS', 'total_capacity_gb': total_space, 'free_capacity_gb': free_space, 'reserved_percentage': reserved, 'qos': False, 'thin_provisioning': True, } LOG.info(_LI("HNAS Capabilities: %(data)s."), {'data': six.text_type(data)}) super(HDSHNASDriver, self)._update_share_stats(data) def manage_existing(self, share, driver_options): """Manages a share that exists on backend. :param share: Share that will be managed. :param driver_options: Empty dict or dict with 'volume_id' option. :returns: Returns a dict with size of share managed and its location (your path in file-system). """ share_id = self._get_hnas_share_id(share['id']) if share_id != share['id']: msg = _("Share ID %s already exists, cannot manage.") % share_id raise exception.HNASBackendException(msg=msg) LOG.info(_LI("Share %(shr_path)s will be managed with ID %(shr_id)s."), {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) old_path_info = share['export_locations'][0]['path'].split(':') old_path = old_path_info[1].split('/') if len(old_path) == 3: evs_ip = old_path_info[0] share_id = old_path[2] else: msg = _("Incorrect path. It should have the following format: " "IP:/shares/share_id.") raise exception.ShareBackendException(msg=msg) if evs_ip != self.hnas_evs_ip: msg = _("The EVS IP %(evs)s is not " "configured.") % {'evs': evs_ip} raise exception.ShareBackendException(msg=msg) if self.backend_name not in share['host']: msg = _("The backend passed in the host parameter (%(shr)s) is " "not configured.") % {'shr': share['host']} raise exception.ShareBackendException(msg=msg) output = self._manage_existing(share_id) self.private_storage.update( share['id'], {'hnas_id': share_id}) return output def unmanage(self, share): """Unmanages a share. :param share: Share that will be unmanaged. """ self.private_storage.delete(share['id']) if len(share['export_locations']) == 0: LOG.info(_LI("The share with ID %(shr_id)s is no longer being " "managed."), {'shr_id': share['id']}) else: LOG.info(_LI("The share with current path %(shr_path)s and ID " "%(shr_id)s is no longer being managed."), {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) def shrink_share(self, share, new_size, share_server=None): """Shrinks a share to new size. :param share: Share that will be shrunk. :param new_size: New size of share. :param share_server: Data structure with share server information. Not used by this driver. """ share_id = self._get_hnas_share_id(share['id']) LOG.debug("Shrinking share in HNAS: %(shr_id)s.", {'shr_id': share['id']}) self._shrink_share(share_id, share['size'], new_size) LOG.info(_LI("Share %(shr_id)s successfully shrunk to " "%(shr_size)sG."), {'shr_id': share['id'], 'shr_size': six.text_type(new_size)}) def _get_hnas_share_id(self, share_id): hnas_id = self.private_storage.get(share_id, 'hnas_id') if hnas_id is None: hnas_id = share_id return hnas_id def _create_share(self, share_id, share_size): """Creates share. Creates a virtual-volume, adds a quota limit and exports it. :param share_id: ID of share that will be created. :param share_size: Size limit of share. :returns: Returns a path of /shares/share_id if the export was created successfully. """ path = '/shares/' + share_id self._check_fs_mounted() self.hnas.vvol_create(share_id) self.hnas.quota_add(share_id, share_size) LOG.debug("Share created with id %(shr)s, size %(size)sG.", {'shr': share_id, 'size': share_size}) try: # Create NFS export self.hnas.nfs_export_add(share_id) LOG.debug("NFS Export created to %(shr)s.", {'shr': share_id}) return path except exception.HNASBackendException as e: with excutils.save_and_reraise_exception(): self.hnas.vvol_delete(share_id) msg = six.text_type(e) LOG.exception(msg) def _check_fs_mounted(self): if not self.hnas.check_fs_mounted(): LOG.debug("Filesystem %(fs)s is unmounted. Mounting...", {'fs': self.fs_name}) self.hnas.mount() def _ensure_share(self, share_id): """Ensure that share is exported. :param share_id: ID of share that will be checked. :returns: Returns a path of /shares/share_id if the export is ok. """ path = '/shares/' + share_id self._check_fs_mounted() self.hnas.check_vvol(share_id) self.hnas.check_quota(share_id) self.hnas.check_export(share_id) return path def _shrink_share(self, share_id, old_size, new_size): """Shrinks a share to new size. :param share_id: ID of share that will be shrunk. :param old_size: Current size of share that will be shrunk. :param new_size: New size of share after shrink operation. """ self._ensure_share(share_id) usage = self.hnas.get_share_usage(share_id) LOG.debug("Usage space in share %(share)s: %(usage)sG", {'share': share_id, 'usage': usage}) if new_size > usage: self.hnas.modify_quota(share_id, new_size) else: raise exception.ShareShrinkingPossibleDataLoss(share_id=share_id) def _extend_share(self, share_id, old_size, new_size): """Extends a share to new size. :param share_id: ID of share that will be extended. :param old_size: Current size of share that will be extended. :param new_size: New size of share after extend operation. """ self._ensure_share(share_id) total, available_space = self.hnas.get_stats() LOG.debug("Available space in filesystem: %(space)sG.", {'space': available_space}) if (new_size - old_size) < available_space: self.hnas.modify_quota(share_id, new_size) else: msg = (_("Share %s cannot be extended due to insufficient space.") % share_id) raise exception.HNASBackendException(msg=msg) def _delete_share(self, share_id): """Deletes share. It uses tree-delete-job-submit to format and delete virtual-volumes. Quota is deleted with virtual-volume. :param share_id: ID of share that will be deleted. """ self._check_fs_mounted() self.hnas.nfs_export_del(share_id) self.hnas.vvol_delete(share_id) LOG.debug("Export and share successfully deleted: %(shr)s on Manila.", {'shr': share_id}) def _manage_existing(self, share_id): """Manages a share that exists on backend. :param share_id: ID of share that will be managed. :returns: Returns a dict with size of share managed and its location (your path in file-system). """ self._ensure_share(share_id) share_size = self.hnas.get_share_quota(share_id) if share_size is None: msg = (_("The share %s trying to be managed does not have a " "quota limit, please set it before manage.") % share_id) raise exception.ManageInvalidShare(msg) path = self.hnas_evs_ip + ':/shares/' + share_id return {'size': share_size, 'export_locations': [path]} def _create_snapshot(self, share_id, snapshot_id): """Creates a snapshot of share. It copies the directory and all files to a new directory inside /snapshots/share_id/. :param share_id: ID of share for snapshot. :param snapshot_id: ID of new snapshot. """ self._ensure_share(share_id) saved_list = self.hnas.get_host_list(share_id) new_list = [] for access in saved_list: new_list.append(access.replace('(rw)', '(ro)')) self.hnas.update_access_rule(share_id, new_list) src_path = '/shares/' + share_id dest_path = '/snapshots/' + share_id + '/' + snapshot_id try: self.hnas.tree_clone(src_path, dest_path) except exception.HNASNothingToCloneException: LOG.warning(_LW("Source directory is empty, creating an empty " "directory.")) self.hnas.create_directory(dest_path) finally: self.hnas.update_access_rule(share_id, saved_list) def _delete_snapshot(self, share_id, snapshot_id): """Deletes snapshot. It receives the share_id only to mount the path for snapshot. :param share_id: ID of share that snapshot was created. :param snapshot_id: ID of snapshot. """ path = '/snapshots/' + share_id + '/' + snapshot_id self.hnas.tree_delete(path) path = '/snapshots/' + share_id self.hnas.delete_directory(path) def _create_share_from_snapshot(self, share, snapshot): """Creates a new share from snapshot. It copies everything from snapshot directory to a new vvol, set a quota limit for it and export. :param share: a dict from new share. :param snapshot: a dict from snapshot that will be copied to new share. :returns: Returns the path for new share. """ dest_path = '/shares/' + share['id'] src_path = '/snapshots/' + snapshot['share_id'] + '/' + snapshot['id'] # Before copying everything to new vvol, we need to create it, # because we only can transform an empty directory into a vvol. self._check_fs_mounted() self.hnas.vvol_create(share['id']) self.hnas.quota_add(share['id'], share['size']) try: self.hnas.tree_clone(src_path, dest_path) except exception.HNASNothingToCloneException: LOG.warning(_LW("Source directory is empty, exporting " "directory.")) self.hnas.nfs_export_add(share['id']) return dest_path manila-2.0.0/manila/share/drivers/quobyte/0000775000567000056710000000000012701407265021630 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/quobyte/__init__.py0000664000567000056710000000000012701407107023722 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/quobyte/quobyte.py0000664000567000056710000003630112701407107023670 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Quobyte driver. Manila shares are directly mapped to Quobyte volumes. The access to the shares is provided by the Quobyte NFS proxy (a Ganesha NFS server). """ import math from oslo_config import cfg from oslo_log import log from oslo_utils import units import six from manila.common import constants from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila.share import driver from manila.share.drivers.quobyte import jsonrpc LOG = log.getLogger(__name__) quobyte_manila_share_opts = [ cfg.StrOpt('quobyte_api_url', help='URL of the Quobyte API server (http or https)'), cfg.StrOpt('quobyte_api_ca', help='The X.509 CA file to verify the server cert.'), cfg.BoolOpt('quobyte_delete_shares', default=False, help='Actually deletes shares (vs. unexport)'), cfg.StrOpt('quobyte_api_username', default='admin', help='Username for Quobyte API server.'), cfg.StrOpt('quobyte_api_password', default='quobyte', secret=True, help='Password for Quobyte API server'), cfg.StrOpt('quobyte_volume_configuration', default='BASE', help='Name of volume configuration used for new shares.'), cfg.StrOpt('quobyte_default_volume_user', default='root', help='Default owning user for new volumes.'), cfg.StrOpt('quobyte_default_volume_group', default='root', help='Default owning group for new volumes.'), ] CONF = cfg.CONF CONF.register_opts(quobyte_manila_share_opts) class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,): """Map share commands to Quobyte volumes. Version history: 1.0 - Initial driver. 1.0.1 - Adds ensure_share() implementation. 1.1 - Adds extend_share() and shrink_share() implementation. 1.2 - Adds update_access() implementation and related methods """ DRIVER_VERSION = '1.2' def __init__(self, *args, **kwargs): super(QuobyteShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(quobyte_manila_share_opts) self.backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name or 'Quobyte') def _fetch_existing_access(self, context, share): volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) result = self.rpc.call('getConfiguration', {}) if result is None: raise exception.QBException( "Could not retrieve Quobyte configuration data!") tenant_configs = result['tenant_configuration'] qb_access_list = [] for tc in tenant_configs: for va in tc['volume_access']: if va['volume_uuid'] == volume_uuid: a_level = constants.ACCESS_LEVEL_RW if va['read_only']: a_level = constants.ACCESS_LEVEL_RO qb_access_list.append({ 'access_to': va['restrict_to_network'], 'access_level': a_level, 'access_type': 'ip' }) return qb_access_list def do_setup(self, context): """Prepares the backend.""" self.rpc = jsonrpc.JsonRpc( url=self.configuration.quobyte_api_url, ca_file=self.configuration.quobyte_api_ca, user_credentials=( self.configuration.quobyte_api_username, self.configuration.quobyte_api_password)) try: self.rpc.call('getInformation', {}) except Exception as exc: LOG.error(_LE("Could not connect to API: %s"), exc) raise exception.QBException( _('Could not connect to API: %s') % exc) def _update_share_stats(self): total_gb, free_gb = self._get_capacities() data = dict( storage_protocol='NFS', vendor_name='Quobyte', share_backend_name=self.backend_name, driver_version=self.DRIVER_VERSION, total_capacity_gb=total_gb, free_capacity_gb=free_gb, reserved_percentage=self.configuration.reserved_share_percentage) super(QuobyteShareDriver, self)._update_share_stats(data) def _get_capacities(self): result = self.rpc.call('getSystemStatistics', {}) total = float(result['total_logical_capacity']) used = float(result['total_logical_usage']) LOG.info(_LI('Read capacity of %(cap)s bytes and ' 'usage of %(use)s bytes from backend. '), {'cap': total, 'use': used}) free = total - used # floor numbers to nine digits (bytes) total = math.floor((total / units.Gi) * units.G) / units.G free = math.floor((free / units.Gi) * units.G) / units.G return total, free def check_for_setup_error(self): pass def get_network_allocations_number(self): return 0 def _get_project_name(self, context, project_id): """Retrieve the project name. TODO (kaisers): retrieve the project name in order to store and use in the backend for better usability. """ return project_id def _resize_share(self, share, new_size): # TODO(kaisers): check and update existing quota if already present self.rpc.call('setQuota', {"consumer": {"type": 3, "identifier": share["name"]}, "limits": {"type": 5, "value": new_size}}) def _resolve_volume_name(self, volume_name, tenant_domain): """Resolve a volume name to the global volume uuid.""" result = self.rpc.call('resolveVolumeName', dict( volume_name=volume_name, tenant_domain=tenant_domain)) if result: return result['volume_uuid'] return None # not found def _subtract_access_lists(self, list_a, list_b): """Returns a list of elements in list_a that are not in list_b :param list_a: Base list of access rules :param list_b: List of access rules not to be returned :return: List of elements of list_a not present in list_b """ sub_tuples_list = [{"to": s.get('access_to'), "type": s.get('access_type'), "level": s.get('access_level')} for s in list_b] return [r for r in list_a if ( {"to": r.get("access_to"), "type": r.get("access_type"), "level": r.get("access_level")} not in sub_tuples_list)] def create_share(self, context, share, share_server=None): """Create or export a volume that is usable as a Manila share.""" if share['share_proto'] != 'NFS': raise exception.QBException( _('Quobyte driver only supports NFS shares')) volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) if not volume_uuid: result = self.rpc.call('createVolume', dict( name=share['name'], tenant_domain=share['project_id'], root_user_id=self.configuration.quobyte_default_volume_user, root_group_id=self.configuration.quobyte_default_volume_group, configuration_name=(self.configuration. quobyte_volume_configuration))) volume_uuid = result['volume_uuid'] result = self.rpc.call('exportVolume', dict( volume_uuid=volume_uuid, protocol='NFS')) return '%(nfs_server_ip)s:%(nfs_export_path)s' % result def delete_share(self, context, share, share_server=None): """Delete the corresponding Quobyte volume.""" volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) if not volume_uuid: LOG.warning(_LW("No volume found for " "share %(project_id)s/%(name)s") % {"project_id": share['project_id'], "name": share['name']}) return if self.configuration.quobyte_delete_shares: self.rpc.call('deleteVolume', {'volume_uuid': volume_uuid}) self.rpc.call('exportVolume', dict( volume_uuid=volume_uuid, remove_export=True)) def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported. :param context: The `context.RequestContext` object for the request :param share: Share instance that will be checked. :param share_server: Data structure with share server information. Not used by this driver. :returns: IP: of share :raises: :ShareResourceNotFound: If the share instance cannot be found in the backend """ volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) LOG.debug("Ensuring Quobyte share %s" % share['name']) if not volume_uuid: raise (exception.ShareResourceNotFound( share_id=share['id'])) result = self.rpc.call('exportVolume', dict( volume_uuid=volume_uuid, protocol='NFS')) return '%(nfs_server_ip)s:%(nfs_export_path)s' % result def _allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" if access['access_type'] != 'ip': raise exception.InvalidShareAccess( _('Quobyte driver only supports ip access control')) volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) ro = access['access_level'] == (constants.ACCESS_LEVEL_RO) call_params = { "volume_uuid": volume_uuid, "read_only": ro, "add_allow_ip": access['access_to']} self.rpc.call('exportVolume', call_params) def _deny_access(self, context, share, access, share_server=None): """Remove white-list ip from a share.""" if access['access_type'] != 'ip': LOG.debug('Quobyte driver only supports ip access control. ' 'Ignoring deny access call for %s , %s', share['name'], self._get_project_name(context, share['project_id'])) return volume_uuid = self._resolve_volume_name( share['name'], self._get_project_name(context, share['project_id'])) call_params = { "volume_uuid": volume_uuid, "remove_allow_ip": access['access_to']} self.rpc.call('exportVolume', call_params) def extend_share(self, ext_share, ext_size, share_server=None): """Uses resize_share to extend a share. :param ext_share: Share model. :param ext_size: New size of share (new_size > share['size']). :param share_server: Currently not used. """ self._resize_share(share=ext_share, new_size=ext_size) def shrink_share(self, shrink_share, shrink_size, share_server=None): """Uses resize_share to shrink a share. Quobyte uses soft quotas. If a shares current size is bigger than the new shrunken size no data is lost. Data can be continuously read from the share but new writes receive out of disk space replies. :param shrink_share: Share model. :param shrink_size: New size of share (new_size < share['size']). :param share_server: Currently not used. """ self._resize_share(share=shrink_share, new_size=shrink_size) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share. Two different cases are supported in here: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' are empty. Driver should apply all access rules for given share. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Driver can ignore rules in 'access_rules' and apply only rules from 'add_rules' and 'delete_rules'. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model :raises If all of the *_rules params are None the method raises an InvalidShareAccess exception """ if (add_rules or delete_rules): # Handling access rule update for d_rule in delete_rules: self._deny_access(context, share, d_rule) for a_rule in add_rules: self._allow_access(context, share, a_rule) else: if not access_rules: LOG.warning(_LW("No access rules provided in update_access.")) else: # Handling access rule recovery existing_rules = self._fetch_existing_access(context, share) missing_rules = self._subtract_access_lists(access_rules, existing_rules) for a_rule in missing_rules: LOG.debug("Adding rule %s in recovery.", six.text_type(a_rule)) self._allow_access(context, share, a_rule) superfluous_rules = self._subtract_access_lists(existing_rules, access_rules) for d_rule in superfluous_rules: LOG.debug("Removing rule %s in recovery.", six.text_type(d_rule)) self._deny_access(context, share, d_rule) manila-2.0.0/manila/share/drivers/quobyte/jsonrpc.py0000664000567000056710000002076412701407107023664 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quobyte driver helper. Control Quobyte over its JSON RPC API. """ import base64 import socket import ssl from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils import six from six.moves import http_client import six.moves.urllib.parse as urlparse from manila import exception from manila.i18n import _, _LW LOG = log.getLogger(__name__) ERROR_ENOENT = 2 CONNECTION_RETRIES = 3 class BasicAuthCredentials(object): def __init__(self, username, password): self._username = username self._password = password @property def username(self): return self._username def get_authorization_header(self): header = '%s:%s' % (self._username, self._password) auth = base64.standard_b64encode(six.b(header)) return 'BASIC %s' % auth.decode() class HTTPSConnectionWithCaVerification(http_client.HTTPConnection): """Verify server cert against a given CA certificate.""" default_port = http_client.HTTPS_PORT def __init__(self, host, port=None, key_file=None, cert_file=None, ca_file=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): http_client.HTTPConnection.__init__(self, host, port, timeout=timeout) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file def connect(self): """Connect to a host on a given (SSL) port.""" sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ca_certs=self.ca_file, cert_reqs=ssl.CERT_REQUIRED) http_client.__all__.append("HTTPSConnectionWithCaVerification") class JsonRpc(object): def __init__(self, url, user_credentials, ca_file=None): parsedurl = urlparse.urlparse(url) self._url = parsedurl.geturl() self._netloc = parsedurl.netloc self._ca_file = ca_file if parsedurl.scheme == 'https': if self._ca_file: self._connection = HTTPSConnectionWithCaVerification( self._netloc, ca_file=self._ca_file.name) else: self._connection = http_client.HTTPSConnection(self._netloc) LOG.warning(_LW( "Will not verify the server certificate of the API service" " because the CA certificate is not available.")) else: self._connection = http_client.HTTPConnection(self._netloc) self._id = 0 self._fail_fast = True self._credentials = BasicAuthCredentials( user_credentials[0], user_credentials[1]) self._require_cert_verify = self._ca_file is not None self._disabled_cert_verification = False def call(self, method_name, user_parameters): parameters = {'retry': 'INFINITELY'} # Backend specific setting if user_parameters: parameters.update(user_parameters) call_body = {'jsonrpc': '2.0', 'method': method_name, 'params': parameters, 'id': six.text_type(self._id)} self.call_counter = 0 self._connection.connect() # prevents http_client timing issue while self.call_counter < CONNECTION_RETRIES: self.call_counter += 1 try: self._id += 1 call_body['id'] = six.text_type(self._id) LOG.debug("Posting to Quobyte backend: %s", jsonutils.dumps(call_body)) self._connection.request( "POST", self._url + '/', jsonutils.dumps(call_body), dict(Authorization=(self._credentials. get_authorization_header()))) response = self._connection.getresponse() self._throw_on_http_error(response) result = jsonutils.loads(response.read()) LOG.debug("Retrieved data from Quobyte backend: %s", result) return self._checked_for_application_error(result) except ssl.SSLError as e: # Generic catch because OpenSSL does not return # meaningful errors. if (not self._disabled_cert_verification and not self._require_cert_verify): LOG.warning(_LW( "Could not verify server certificate of " "API service against CA.")) self._connection.close() # Core HTTPSConnection does no certificate verification. self._connection = http_client.HTTPSConnection( self._netloc) self._disabled_cert_verification = True else: raise exception.QBException(_( "Client SSL subsystem returned error: %s") % e) except http_client.BadStatusLine as e: raise exception.QBException(_( "If SSL is enabled for the API service, the URL must" " start with 'https://' for the URL. Failed to parse" " status code from server response. Error was %s") % e) except socket.error as se: error_code = se.errno error_msg = se.strerror composite_msg = _("Socket error No. %(code)s (%(msg)s) " "connecting to API with") % { 'code': (six.text_type(error_code)), 'msg': error_msg} if self._fail_fast: raise exception.QBException(composite_msg) else: LOG.warning(composite_msg) except http_client.HTTPException as e: with excutils.save_and_reraise_exception() as ctxt: if self._fail_fast: ctxt.reraise = True else: LOG.warning(_LW("Encountered error, retrying: %s"), six.text_type(e)) ctxt.reraise = False raise exception.QBException("Unable to connect to backend after " "%s retries" % six.text_type(CONNECTION_RETRIES)) def _throw_on_http_error(self, response): if response.status == 401: raise exception.QBException( _("JSON RPC failed: unauthorized user %(status)s %(reason)s" " Please check the Quobyte API service log for " "more details.") % {'status': six.text_type(response.status), 'reason': response.reason}) elif response.status >= 300: raise exception.QBException( _("JSON RPC failed: %(status)s %(reason)s" " Please check the Quobyte API service log for " "more details.") % {'status': six.text_type(response.status), 'reason': response.reason}) def _checked_for_application_error(self, result): if 'error' in result and result['error']: if 'message' in result['error'] and 'code' in result['error']: if result["error"]["code"] == ERROR_ENOENT: return None # No Entry else: raise exception.QBRpcException( result=result["error"]["message"], qbcode=result["error"]["code"]) else: raise exception.QBException(six.text_type(result["error"])) return result["result"] manila-2.0.0/manila/share/drivers/generic.py0000664000567000056710000015034512701407112022125 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Driver for shares.""" import os import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import units import retrying import six from manila.common import constants as const from manila import compute from manila import context from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila.share import driver from manila.share.drivers import service_instance from manila import utils from manila import volume LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('smb_template_config_path', default='$state_path/smb.conf', help="Path to smb config."), cfg.StrOpt('volume_name_template', default='manila-share-%s', help="Volume name template."), cfg.StrOpt('volume_snapshot_name_template', default='manila-snapshot-%s', help="Volume snapshot name template."), cfg.StrOpt('share_mount_path', default='/shares', help="Parent path in service instance where shares " "will be mounted."), cfg.IntOpt('max_time_to_create_volume', default=180, help="Maximum time to wait for creating cinder volume."), cfg.IntOpt('max_time_to_extend_volume', default=180, help="Maximum time to wait for extending cinder volume."), cfg.IntOpt('max_time_to_attach', default=120, help="Maximum time to wait for attaching cinder volume."), cfg.StrOpt('service_instance_smb_config_path', default='$share_mount_path/smb.conf', help="Path to SMB config in service instance."), cfg.ListOpt('share_helpers', default=[ 'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess', 'NFS=manila.share.drivers.helpers.NFSHelper', ], help='Specify list of share export helpers.'), cfg.StrOpt('share_volume_fstype', default='ext4', choices=['ext4', 'ext3'], help='Filesystem type of the share volume.'), cfg.StrOpt('cinder_volume_type', help='Name or id of cinder volume type which will be used ' 'for all volumes created by driver.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) # NOTE(u_glide): These constants refer to the column number in the "df" output BLOCK_DEVICE_SIZE_INDEX = 1 USED_SPACE_INDEX = 2 def ensure_server(f): def wrap(self, context, *args, **kwargs): server = kwargs.get('share_server') if not self.driver_handles_share_servers: if not server: server = self.service_instance_manager.get_common_server() kwargs['share_server'] = server else: raise exception.ManilaException( _("Share server handling is not available. " "But 'share_server' was provided. '%s'. " "Share network should not be used.") % server.get('id')) elif not server: raise exception.ManilaException( _("Share server handling is enabled. But 'share_server' " "is not provided. Make sure you used 'share_network'.")) if not server.get('backend_details'): raise exception.ManilaException( _("Share server '%s' does not have backend details.") % server['id']) if not self.service_instance_manager.ensure_service_instance( context, server['backend_details']): raise exception.ServiceInstanceUnavailable() return f(self, context, *args, **kwargs) return wrap class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, *args, **kwargs): """Do initialization.""" super(GenericShareDriver, self).__init__( [False, True], *args, **kwargs) self.admin_context = context.get_admin_context() self.configuration.append_config_values(share_opts) self._helpers = {} self.backend_name = self.configuration.safe_get( 'share_backend_name') or "Cinder_Volumes" self.ssh_connections = {} self._setup_service_instance_manager() self.private_storage = kwargs.get('private_storage') def _setup_service_instance_manager(self): self.service_instance_manager = ( service_instance.ServiceInstanceManager( driver_config=self.configuration)) def _ssh_exec(self, server, command, check_exit_code=True): connection = self.ssh_connections.get(server['instance_id']) ssh_conn_timeout = self.configuration.ssh_conn_timeout if not connection: ssh_pool = utils.SSHPool(server['ip'], 22, ssh_conn_timeout, server['username'], server.get('password'), server.get('pk_path'), max_size=1) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) return processutils.ssh_execute(ssh, ' '.join(command), check_exit_code=check_exit_code) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" def do_setup(self, context): """Any initialization the generic driver does while starting.""" super(GenericShareDriver, self).do_setup(context) self.compute_api = compute.API() self.volume_api = volume.API() self._setup_helpers() common_sv_available = False share_server = None sv_fetch_retry_interval = 5 while not (common_sv_available or self.driver_handles_share_servers): try: # Verify availability of common server share_server = ( self.service_instance_manager.get_common_server()) common_sv_available = self._is_share_server_active( context, share_server) except Exception as ex: LOG.error(ex) if not common_sv_available: time.sleep(sv_fetch_retry_interval) LOG.warning(_LW("Waiting for the common service VM to become " "available. " "Driver is currently uninitialized. " "Share server: %(share_server)s " "Retry interval: %(retry_interval)s"), dict(share_server=share_server, retry_interval=sv_fetch_retry_interval)) def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" helpers = self.configuration.share_helpers if helpers: for helper_str in helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec, self.configuration) else: raise exception.ManilaException( "No protocol helpers selected for Generic Driver. " "Please specify using config option 'share_helpers'.") @ensure_server def create_share(self, context, share, share_server=None): """Creates share.""" helper = self._get_helper(share) server_details = share_server['backend_details'] volume = self._allocate_container(self.admin_context, share) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) self._format_device(server_details, volume) self._mount_device(share, server_details, volume) location = helper.create_export( server_details, share['name']) export_list = [{ "path": location, "is_admin_only": False, "metadata": { # TODO(vponomaryov): remove this fake metadata when # proper appears. "export_location_metadata_example": "example", }, }] if server_details.get('admin_ip'): admin_location = location.replace( server_details['public_address'], server_details['admin_ip']) export_list.append({ "path": admin_location, "is_admin_only": True, "metadata": { # TODO(vponomaryov): remove this fake metadata when # proper appears. "export_location_metadata_example": "example", }, }) return export_list @utils.retry(exception.ProcessExecutionError, backoff_rate=1) def _is_device_file_available(self, server_details, volume): """Checks whether the device file is available""" command = ['sudo', 'test', '-b', volume['mountpoint']] self._ssh_exec(server_details, command) def _format_device(self, server_details, volume): """Formats device attached to the service vm.""" self._is_device_file_available(server_details, volume) command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype, volume['mountpoint']] self._ssh_exec(server_details, command) def _is_device_mounted(self, mount_path, server_details, volume=None): """Checks whether volume already mounted or not.""" log_data = { 'mount_path': mount_path, 'server_id': server_details['instance_id'], } if volume and volume.get('mountpoint', ''): log_data['volume_id'] = volume['id'] log_data['dev_mount_path'] = volume['mountpoint'] msg = ("Checking whether volume '%(volume_id)s' with mountpoint " "'%(dev_mount_path)s' is mounted on mount path '%(mount_p" "ath)s' on server '%(server_id)s' or not." % log_data) else: msg = ("Checking whether mount path '%(mount_path)s' exists on " "server '%(server_id)s' or not." % log_data) LOG.debug(msg) mounts_list_cmd = ['sudo', 'mount'] output, __ = self._ssh_exec(server_details, mounts_list_cmd) mounts = output.split('\n') for mount in mounts: mount_elements = mount.split(' ') if (len(mount_elements) > 2 and mount_path == mount_elements[2]): if volume: # Mount goes with device path and mount path if (volume.get('mountpoint', '') == mount_elements[0]): return True else: # Unmount goes only by mount path return True return False def _sync_mount_temp_and_perm_files(self, server_details): """Sync temporary and permanent files for mounted filesystems.""" try: self._ssh_exec( server_details, ['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE], ) except exception.ProcessExecutionError as e: LOG.error(_LE("Failed to sync mount files on server '%s'."), server_details['instance_id']) raise exception.ShareBackendException(msg=six.text_type(e)) try: # Remount it to avoid postponed point of failure self._ssh_exec(server_details, ['sudo', 'mount', '-a']) except exception.ProcessExecutionError as e: LOG.error(_LE("Failed to mount all shares on server '%s'."), server_details['instance_id']) raise exception.ShareBackendException(msg=six.text_type(e)) def _mount_device(self, share, server_details, volume): """Mounts block device to the directory on service vm. Mounts attached and formatted block device to the directory if not mounted yet. """ @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _mount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'dev': volume['mountpoint'], 'path': mount_path, 'server': server_details['instance_id'], } try: if not self._is_device_mounted(mount_path, server_details, volume): LOG.debug("Mounting '%(dev)s' to path '%(path)s' on " "server '%(server)s'.", log_data) mount_cmd = ['sudo mkdir -p', mount_path, '&&'] mount_cmd.extend(['sudo mount', volume['mountpoint'], mount_path]) mount_cmd.extend(['&& sudo chmod 777', mount_path]) self._ssh_exec(server_details, mount_cmd) # Add mount permanently self._sync_mount_temp_and_perm_files(server_details) else: LOG.warning(_LW("Mount point '%(path)s' already exists on " "server '%(server)s'."), log_data) except exception.ProcessExecutionError as e: raise exception.ShareBackendException(msg=six.text_type(e)) return _mount_device_with_lock() @utils.retry(exception.ProcessExecutionError) def _unmount_device(self, share, server_details): """Unmounts block device from directory on service vm.""" @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _unmount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'path': mount_path, 'server': server_details['instance_id'], } if self._is_device_mounted(mount_path, server_details): LOG.debug("Unmounting path '%(path)s' on server " "'%(server)s'.", log_data) unmount_cmd = ['sudo umount', mount_path, '&& sudo rmdir', mount_path] self._ssh_exec(server_details, unmount_cmd) # Remove mount permanently self._sync_mount_temp_and_perm_files(server_details) else: LOG.warning(_LW("Mount point '%(path)s' does not exist on " "server '%(server)s'."), log_data) return _unmount_device_with_lock() def _get_mount_path(self, share): """Returns the path to use for mount device in service vm.""" return os.path.join(self.configuration.share_mount_path, share['name']) def _attach_volume(self, context, share, instance_id, volume): """Attaches cinder volume to service vm.""" @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_attach(volume): if volume['status'] == 'in-use': attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list( self.admin_context, instance_id)] if volume['id'] in attached_volumes: return volume else: raise exception.ManilaException( _('Volume %s is already attached to another instance') % volume['id']) @retrying.retry(stop_max_attempt_number=3, wait_fixed=2000, retry_on_exception=lambda exc: True) def attach_volume(): self.compute_api.instance_volume_attach( self.admin_context, instance_id, volume['id']) attach_volume() t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] == 'in-use': return volume elif volume['status'] != 'attaching': raise exception.ManilaException( _('Failed to attach volume %s') % volume['id']) time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been attached in ' '%(max_time)ss. Giving up.') % err_msg) return do_attach(volume) def _get_volume_name(self, share_id): return self.configuration.volume_name_template % share_id def _get_volume(self, context, share_id): """Finds volume, associated to the specific share.""" volume_id = self.private_storage.get(share_id, 'volume_id') if volume_id is not None: return self.volume_api.get(context, volume_id) else: # Fallback to legacy method return self._get_volume_legacy(context, share_id) def _get_volume_legacy(self, context, share_id): # NOTE(u_glide): this method is deprecated and will be removed in # future versions volume_name = self._get_volume_name(share_id) search_opts = {'name': volume_name} if context.is_admin: search_opts['all_tenants'] = True volumes_list = self.volume_api.get_all(context, search_opts) if len(volumes_list) == 1: return volumes_list[0] elif len(volumes_list) > 1: LOG.error( _LE("Expected only one volume in volume list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'."), { 'name': volume_name, 'result': volumes_list}) raise exception.ManilaException( _("Error. Ambiguous volumes for name '%s'") % volume_name) return None def _get_volume_snapshot(self, context, snapshot_id): """Find volume snapshot associated to the specific share snapshot.""" volume_snapshot_id = self.private_storage.get( snapshot_id, 'volume_snapshot_id') if volume_snapshot_id is not None: return self.volume_api.get_snapshot(context, volume_snapshot_id) else: # Fallback to legacy method return self._get_volume_snapshot_legacy(context, snapshot_id) def _get_volume_snapshot_legacy(self, context, snapshot_id): # NOTE(u_glide): this method is deprecated and will be removed in # future versions volume_snapshot_name = ( self.configuration.volume_snapshot_name_template % snapshot_id) volume_snapshot_list = self.volume_api.get_all_snapshots( context, {'name': volume_snapshot_name}) volume_snapshot = None if len(volume_snapshot_list) == 1: volume_snapshot = volume_snapshot_list[0] elif len(volume_snapshot_list) > 1: LOG.error( _LE("Expected only one volume snapshot in list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'."), { 'name': volume_snapshot_name, 'result': volume_snapshot_list}) raise exception.ManilaException( _('Error. Ambiguous volume snaphots')) return volume_snapshot def _detach_volume(self, context, share, server_details): """Detaches cinder volume from service vm.""" instance_id = server_details['instance_id'] @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_detach(): attached_volumes = [vol.id for vol in self.compute_api.instance_volumes_list( self.admin_context, instance_id)] volume = self._get_volume(context, share['id']) if volume and volume['id'] in attached_volumes: self.compute_api.instance_volume_detach( self.admin_context, instance_id, volume['id'] ) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] in (const.STATUS_AVAILABLE, const.STATUS_ERROR): break time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been detached in ' '%(max_time)ss. Giving up.') % err_msg) do_detach() def _allocate_container(self, context, share, snapshot=None): """Creates cinder volume, associated to share by name.""" volume_snapshot = None if snapshot: volume_snapshot = self._get_volume_snapshot(context, snapshot['id']) volume = self.volume_api.create( context, share['size'], self.configuration.volume_name_template % share['id'], '', snapshot=volume_snapshot, volume_type=self.configuration.cinder_volume_type, availability_zone=share['availability_zone']) self.private_storage.update( share['id'], {'volume_id': volume['id']}) msg_error = _('Failed to create volume') msg_timeout = ( _('Volume has not been created in %ss. Giving up') % self.configuration.max_time_to_create_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_create_volume, msg_error=msg_error, msg_timeout=msg_timeout ) def _wait_for_available_volume(self, volume, timeout, msg_error, msg_timeout, expected_size=None): t = time.time() while time.time() - t < timeout: if volume['status'] == const.STATUS_AVAILABLE: if expected_size and volume['size'] != expected_size: LOG.debug("The volume %(vol_id)s is available but the " "volume size does not match the expected size. " "A volume resize operation may be pending. " "Expected size: %(expected_size)s, " "Actual size: %(volume_size)s.", dict(vol_id=volume['id'], expected_size=expected_size, volume_size=volume['size'])) else: break elif 'error' in volume['status'].lower(): raise exception.ManilaException(msg_error) time.sleep(1) volume = self.volume_api.get(self.admin_context, volume['id']) else: raise exception.ManilaException(msg_timeout) return volume def _deallocate_container(self, context, share): """Deletes cinder volume.""" try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.info(_LI("Volume not found. Already deleted?")) volume = None if volume: if volume['status'] == 'in-use': raise exception.ManilaException( _('Volume is still in use and ' 'cannot be deleted now.')) self.volume_api.delete(context, volume['id']) t = time.time() while (time.time() - t < self.configuration.max_time_to_create_volume): try: volume = self.volume_api.get(context, volume['id']) except exception.VolumeNotFound: LOG.debug('Volume was deleted successfully') break time.sleep(1) else: raise exception.ManilaException( _('Volume have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = dict( share_backend_name=self.backend_name, storage_protocol='NFS_CIFS', reserved_percentage=self.configuration.reserved_share_percentage, consistency_group_support='pool', ) super(GenericShareDriver, self)._update_share_stats(data) @ensure_server def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" helper = self._get_helper(share) server_details = share_server['backend_details'] volume = self._allocate_container(self.admin_context, share, snapshot) volume = self._attach_volume( self.admin_context, share, share_server['backend_details']['instance_id'], volume) self._mount_device(share, share_server['backend_details'], volume) location = helper.create_export(share_server['backend_details'], share['name']) export_list = [{ "path": location, "is_admin_only": False, "metadata": { # TODO(vponomaryov): remove this fake metadata when # proper appears. "export_location_metadata_example": "example", }, }] if server_details.get('admin_ip'): admin_location = location.replace( server_details['public_address'], server_details['admin_ip']) export_list.append({ "path": admin_location, "is_admin_only": True, "metadata": { # TODO(vponomaryov): remove this fake metadata when # proper appears. "export_location_metadata_example": "example", }, }) return export_list @ensure_server def extend_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) self._detach_volume(self.admin_context, share, server_details) volume = self._get_volume(self.admin_context, share['id']) volume = self._extend_volume(self.admin_context, volume, new_size) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) self._resize_filesystem(server_details, volume) self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _extend_volume(self, context, volume, new_size): self.volume_api.extend(context, volume['id'], new_size) msg_error = _('Failed to extend volume %s') % volume['id'] msg_timeout = ( _('Volume has not been extended in %ss. Giving up') % self.configuration.max_time_to_extend_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_extend_volume, msg_error=msg_error, msg_timeout=msg_timeout, expected_size=new_size ) @ensure_server def shrink_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) export_location = share['export_locations'][0]['path'] mount_path = helper.get_share_path_by_export_location( server_details, export_location) consumed_space = self._get_consumed_space(mount_path, server_details) LOG.debug("Consumed space on share: %s", consumed_space) if consumed_space >= new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) volume = self._get_volume(self.admin_context, share['id']) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) try: self._resize_filesystem(server_details, volume, new_size=new_size) except exception.Invalid: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) except Exception as e: msg = _("Cannot shrink share: %s") % six.text_type(e) raise exception.Invalid(msg) finally: self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _resize_filesystem(self, server_details, volume, new_size=None): """Resize filesystem of provided volume.""" check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']] self._ssh_exec(server_details, check_command) command = ['sudo', 'resize2fs', volume['mountpoint']] if new_size: command.append("%sG" % six.text_type(new_size)) try: self._ssh_exec(server_details, command) except processutils.ProcessExecutionError as e: if e.stderr.find('New size smaller than minimum') != -1: msg = (_("Invalid 'new_size' provided: %s") % six.text_type(new_size)) raise exception.Invalid(msg) else: msg = _("Cannot resize file-system: %s") % six.text_type(e) raise exception.ManilaException(msg) def _is_share_server_active(self, context, share_server): """Check if the share server is active.""" has_active_share_server = ( share_server and share_server.get('backend_details') and self.service_instance_manager.ensure_service_instance( context, share_server['backend_details'])) return has_active_share_server def delete_share(self, context, share, share_server=None): """Deletes share.""" helper = self._get_helper(share) if not self.driver_handles_share_servers: share_server = self.service_instance_manager.get_common_server() if self._is_share_server_active(context, share_server): helper.remove_export( share_server['backend_details'], share['name']) self._unmount_device(share, share_server['backend_details']) self._detach_volume(self.admin_context, share, share_server['backend_details']) # Note(jun): It is an intended breakage to deal with the cases # with any reason that caused absence of Nova instances. self._deallocate_container(self.admin_context, share) self.private_storage.delete(share['id']) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" model_update = {} volume = self._get_volume(self.admin_context, snapshot['share_id']) volume_snapshot_name = (self.configuration. volume_snapshot_name_template % snapshot['id']) volume_snapshot = self.volume_api.create_snapshot_force( self.admin_context, volume['id'], volume_snapshot_name, '') t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: if volume_snapshot['status'] == const.STATUS_AVAILABLE: break if volume_snapshot['status'] == const.STATUS_ERROR: raise exception.ManilaException(_('Failed to create volume ' 'snapshot')) time.sleep(1) volume_snapshot = self.volume_api.get_snapshot( self.admin_context, volume_snapshot['id']) # NOTE(xyang): We should look at whether we still need to save # volume_snapshot_id in private_storage later, now that is saved # in provider_location. self.private_storage.update( snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']}) # NOTE(xyang): Need to update provider_location in the db so # that it can be used in manage/unmanage snapshot tempest tests. model_update['provider_location'] = volume_snapshot['id'] else: raise exception.ManilaException( _('Volume snapshot have not been ' 'created in %ss. Giving up') % self.configuration.max_time_to_create_volume) return model_update def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" volume_snapshot = self._get_volume_snapshot(self.admin_context, snapshot['id']) if volume_snapshot is None: return self.volume_api.delete_snapshot(self.admin_context, volume_snapshot['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: try: snapshot = self.volume_api.get_snapshot(self.admin_context, volume_snapshot['id']) except exception.VolumeSnapshotNotFound: LOG.debug('Volume snapshot was deleted successfully') self.private_storage.delete(snapshot['id']) break time.sleep(1) else: raise exception.ManilaException( _('Volume snapshot have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) @ensure_server def ensure_share(self, context, share, share_server=None): """Ensure that storage are mounted and exported.""" helper = self._get_helper(share) volume = self._get_volume(context, share['id']) # NOTE(vponomaryov): volume can be None for managed shares if volume: volume = self._attach_volume( context, share, share_server['backend_details']['instance_id'], volume) self._mount_device(share, share_server['backend_details'], volume) helper.create_export( share_server['backend_details'], share['name'], recreate=True) @ensure_server def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ self._get_helper(share).update_access(share_server['backend_details'], share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) def _get_helper(self, share): helper = self._helpers.get(share['share_proto']) if helper: return helper else: raise exception.InvalidShare( reason="Wrong, unsupported or disabled protocol") def get_network_allocations_number(self): """Get number of network interfaces to be created.""" # NOTE(vponomaryov): Generic driver does not need allocations, because # Nova will handle it. It is valid for all multitenant drivers, that # use service instance provided by Nova. return 0 def _setup_server(self, network_info, metadata=None): msg = "Creating share server '%s'." LOG.debug(msg % network_info['server_id']) server = self.service_instance_manager.set_up_service_instance( self.admin_context, network_info) for helper in self._helpers.values(): helper.init_helper(server) return server def _teardown_server(self, server_details, security_services=None): instance_id = server_details.get("instance_id") LOG.debug("Removing share infrastructure for service instance '%s'.", instance_id) self.service_instance_manager.delete_service_instance( self.admin_context, server_details) def manage_existing(self, share, driver_options): """Manage existing share to manila. Generic driver accepts only one driver_option 'volume_id'. If an administrator provides this option, then appropriate Cinder volume will be managed by Manila as well. :param share: share data :param driver_options: Empty dict or dict with 'volume_id' option. :return: dict with share size, example: {'size': 1} """ helper = self._get_helper(share) share_server = self.service_instance_manager.get_common_server() server_details = share_server['backend_details'] old_export_location = share['export_locations'][0]['path'] mount_path = helper.get_share_path_by_export_location( share_server['backend_details'], old_export_location) LOG.debug("Manage: mount path = %s", mount_path) mounted = self._is_device_mounted(mount_path, server_details) LOG.debug("Manage: is share mounted = %s", mounted) if not mounted: msg = _("Provided share %s is not mounted.") % share['id'] raise exception.ManageInvalidShare(reason=msg) def get_volume(): if 'volume_id' in driver_options: try: return self.volume_api.get( self.admin_context, driver_options['volume_id']) except exception.VolumeNotFound as e: raise exception.ManageInvalidShare(reason=six.text_type(e)) # NOTE(vponomaryov): Manila can only combine volume name by itself, # nowhere to get volume ID from. Return None since Cinder volume # names are not unique or fixed, hence, they can not be used for # sure. return None share_volume = get_volume() if share_volume: instance_volumes = self.compute_api.instance_volumes_list( self.admin_context, server_details['instance_id']) attached_volumes = [vol.id for vol in instance_volumes] LOG.debug('Manage: attached volumes = %s', six.text_type(attached_volumes)) if share_volume['id'] not in attached_volumes: msg = _("Provided volume %s is not attached " "to service instance.") % share_volume['id'] raise exception.ManageInvalidShare(reason=msg) linked_volume_name = self._get_volume_name(share['id']) if share_volume['name'] != linked_volume_name: LOG.debug('Manage: volume_id = %s' % share_volume['id']) self.volume_api.update(self.admin_context, share_volume['id'], {'name': linked_volume_name}) self.private_storage.update( share['id'], {'volume_id': share_volume['id']}) share_size = share_volume['size'] else: share_size = self._get_mounted_share_size( mount_path, share_server['backend_details']) export_locations = helper.get_exports_for_share( server_details, old_export_location) return {'size': share_size, 'export_locations': export_locations} def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing share snapshot with manila. :param snapshot: Snapshot data :param driver_options: Not used by the Generic driver currently :return: dict with share snapshot size, example: {'size': 1} """ model_update = {} volume_snapshot = None snapshot_size = snapshot.get('share_size', 0) provider_location = snapshot.get('provider_location') try: volume_snapshot = self.volume_api.get_snapshot( self.admin_context, provider_location) except exception.VolumeSnapshotNotFound as e: raise exception.ManageInvalidShareSnapshot( reason=six.text_type(e)) if volume_snapshot: snapshot_size = volume_snapshot['size'] # NOTE(xyang): volume_snapshot_id is saved in private_storage # in create_snapshot, so saving it here too for consistency. # We should look at whether we still need to save it in # private_storage later. self.private_storage.update( snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']}) # NOTE(xyang): provider_location is used to map a Manila snapshot # to its name on the storage backend and prevent managing of the # same snapshot twice. model_update['provider_location'] = volume_snapshot['id'] model_update['size'] = snapshot_size return model_update def unmanage_snapshot(self, snapshot): """Unmanage share snapshot with manila.""" self.private_storage.delete(snapshot['id']) def _get_mount_stats_by_index(self, mount_path, server_details, index, block_size='G'): """Get mount stats using df shell command. :param mount_path: Share path on share server :param server_details: Share server connection details :param index: Data index in df command output: BLOCK_DEVICE_SIZE_INDEX - Size of block device USED_SPACE_INDEX - Used space :param block_size: size of block (example: G, M, Mib, etc) :returns: value of provided index """ share_size_cmd = ['df', '-PB%s' % block_size, mount_path] output, __ = self._ssh_exec(server_details, share_size_cmd) lines = output.split('\n') return int(lines[1].split()[index][:-1]) def _get_mounted_share_size(self, mount_path, server_details): try: size = self._get_mount_stats_by_index( mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX) except Exception as e: msg = _("Cannot calculate size of share %(path)s : %(error)s") % { 'path': mount_path, 'error': six.text_type(e) } raise exception.ManageInvalidShare(reason=msg) return size def _get_consumed_space(self, mount_path, server_details): try: size = self._get_mount_stats_by_index( mount_path, server_details, USED_SPACE_INDEX, block_size='M') size /= float(units.Ki) except Exception as e: msg = _("Cannot calculate consumed space on share " "%(path)s : %(error)s") % { 'path': mount_path, 'error': six.text_type(e) } raise exception.InvalidShare(reason=msg) return size @ensure_server def create_consistency_group(self, context, cg_dict, share_server=None): """Creates a consistency group. Since we are faking the CG object, apart from verifying if the share_server is valid, we do nothing else here. """ LOG.debug('Created a Consistency Group with ID: %s.', cg_dict['id']) msg = _LW('The Generic driver has no means to guarantee consistency ' 'group snapshots are actually consistent. This ' 'implementation is for reference and testing purposes only.') LOG.warning(msg) def delete_consistency_group(self, context, cg_dict, share_server=None): """Deletes a consistency group. Since we are faking the CG object we do nothing here. """ LOG.debug('Deleted the consistency group with ID %s.', cg_dict['id']) def _cleanup_cg_share_snapshot(self, context, share_snapshot, share_server): """Deletes the snapshot of a share belonging to a consistency group.""" try: self.delete_snapshot(context, share_snapshot, share_server) except exception.ManilaException: msg = _LE('Could not delete CG Snapshot %(snap)s ' 'for share %(share)s.') LOG.error(msg % { 'snap': share_snapshot['id'], 'share': share_snapshot['share_id'], }) raise @ensure_server def create_cgsnapshot(self, context, snap_dict, share_server=None): """Creates a consistency group snapshot one or more shares.""" LOG.debug('Attempting to create a CG snapshot %s.' % snap_dict['id']) msg = _LW('The Consistency Group Snapshot being created is ' 'not expected to be consistent. This implementation is ' 'for reference and testing purposes only.') LOG.warning(msg) cg_members = snap_dict.get('cgsnapshot_members', []) if not cg_members: LOG.warning(_LW('No shares in Consistency Group to Create CG ' 'snapshot.')) else: share_snapshots = [] for member in cg_members: share_snapshot = { 'share_id': member['share_id'], 'id': member['id'], } try: self.create_snapshot(context, share_snapshot, share_server) share_snapshots.append(share_snapshot) except exception.ManilaException as e: msg = _LE('Could not create CG Snapshot. Failed ' 'to create share snapshot %(snap)s for ' 'share %(share)s.') LOG.exception(msg % { 'snap': share_snapshot['id'], 'share': share_snapshot['share_id'] }) # clean up any share snapshots previously created LOG.debug('Attempting to clean up snapshots due to ' 'failure...') for share_snapshot in share_snapshots: self._cleanup_cg_share_snapshot(context, share_snapshot, share_server) raise e LOG.debug('Successfully created CG snapshot %s.' % snap_dict['id']) return None, None @ensure_server def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Deletes a consistency group snapshot.""" cg_members = snap_dict.get('cgsnapshot_members', []) LOG.debug('Deleting CG snapshot %s.' % snap_dict['id']) for member in cg_members: share_snapshot = { 'share_id': member['share_id'], 'id': member['id'], } self._cleanup_cg_share_snapshot(context, share_snapshot, share_server) LOG.debug('Deleted CG snapshot %s.' % snap_dict['id']) return None, None @ensure_server def create_consistency_group_from_cgsnapshot(self, context, cg_dict, cgsnapshot_dict, share_server=None): """Creates a consistency group from an existing CG snapshot.""" # Ensure that the consistency group snapshot has members if not cgsnapshot_dict['cgsnapshot_members']: return None, None clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict) share_update_list = list() LOG.debug('Creating consistency group from CG snapshot %s.', cgsnapshot_dict['id']) for clone in clone_list: kwargs = {} if self.driver_handles_share_servers: kwargs['share_server'] = share_server export_location = ( self.create_share_from_snapshot( context, clone['share'], clone['snapshot'], **kwargs)) share_update_list.append({ 'id': clone['share']['id'], 'export_locations': export_location, }) return None, share_update_list def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict): """Collate the data for a clone of the CG snapshot. Given two data structures, a CG snapshot (cgsnapshot_dict) and a new CG to be cloned from the snapshot (cg_dict), match up both structures into a list of dicts (share & snapshot) suitable for use by existing method that clones individual share snapshots. """ clone_list = list() for share in cg_dict['shares']: clone_info = {'share': share} for cgsnapshot_member in cgsnapshot_dict['cgsnapshot_members']: if (share['source_cgsnapshot_member_id'] == cgsnapshot_member['id']): clone_info['snapshot'] = { 'id': cgsnapshot_member['id'], } break if len(clone_info) != 2: msg = _("Invalid data supplied for creating consistency " "group from CG snapshot %s.") % cgsnapshot_dict['id'] raise exception.InvalidConsistencyGroup(reason=msg) clone_list.append(clone_info) return clone_list manila-2.0.0/manila/share/drivers/__init__.py0000664000567000056710000000151012701407107022241 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.share.driver` -- Manila Share Drivers ===================================================== .. automodule:: manila.share.driver :platform: Unix :synopsis: Module containing all the Manila Share drivers. """ manila-2.0.0/manila/share/drivers/hpe/0000775000567000056710000000000012701407265020714 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hpe/__init__.py0000664000567000056710000000000012701407107023006 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/hpe/hpe_3par_driver.py0000664000567000056710000004433412701407107024345 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Driver for OpenStack Manila.""" import datetime import hashlib import inspect import logging import os import re from oslo_config import cfg from oslo_log import log import six from manila.common import config from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila.share import driver from manila.share.drivers.hpe import hpe_3par_mediator from manila.share import share_types from manila import utils HPE3PAR_OPTS = [ cfg.StrOpt('hpe3par_api_url', default='', help="3PAR WSAPI Server Url like " "https://<3par ip>:8080/api/v1", deprecated_name='hp3par_api_url'), cfg.StrOpt('hpe3par_username', default='', help="3PAR username with the 'edit' role", deprecated_name='hp3par_username'), cfg.StrOpt('hpe3par_password', default='', help="3PAR password for the user specified in hpe3par_username", secret=True, deprecated_name='hp3par_password'), cfg.StrOpt('hpe3par_san_ip', default='', help="IP address of SAN controller", deprecated_name='hp3par_san_ip'), cfg.StrOpt('hpe3par_san_login', default='', help="Username for SAN controller", deprecated_name='hp3par_san_login'), cfg.StrOpt('hpe3par_san_password', default='', help="Password for SAN controller", secret=True, deprecated_name='hp3par_san_password'), cfg.PortOpt('hpe3par_san_ssh_port', default=22, help='SSH port to use with SAN', deprecated_name='hp3par_san_ssh_port'), cfg.StrOpt('hpe3par_fpg', default="OpenStack", help="The File Provisioning Group (FPG) to use", deprecated_name='hp3par_fpg'), cfg.StrOpt('hpe3par_share_ip_address', default='', help="The IP address for shares not using a share server", deprecated_name='hp3par_share_ip_address'), cfg.BoolOpt('hpe3par_fstore_per_share', default=False, help="Use one filestore per share", deprecated_name='hp3par_fstore_per_share'), cfg.BoolOpt('hpe3par_require_cifs_ip', default=False, help="Require IP access rules for CIFS (in addition to user)"), cfg.BoolOpt('hpe3par_debug', default=False, help="Enable HTTP debugging to 3PAR", deprecated_name='hp3par_debug'), cfg.StrOpt('hpe3par_cifs_admin_access_username', default='', help="File system admin user name for CIFS.", deprecated_name='hp3par_cifs_admin_access_username'), cfg.StrOpt('hpe3par_cifs_admin_access_password', default='', help="File system admin password for CIFS.", secret=True, deprecated_name='hp3par_cifs_admin_access_password'), cfg.StrOpt('hpe3par_cifs_admin_access_domain', default='LOCAL_CLUSTER', help="File system domain for the CIFS admin user.", deprecated_name='hp3par_cifs_admin_access_domain'), cfg.StrOpt('hpe3par_share_mount_path', default='/mnt/', help="The path where shares will be mounted when deleting " "nested file trees.", deprecated_name='hpe3par_share_mount_path'), ] CONF = cfg.CONF CONF.register_opts(HPE3PAR_OPTS) LOG = log.getLogger(__name__) class HPE3ParShareDriver(driver.ShareDriver): """HPE 3PAR driver for Manila. Supports NFS and CIFS protocols on arrays with File Persona. Version history: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Remove file tree on delete when using nested shares #1538800 """ VERSION = "2.0.3" def __init__(self, *args, **kwargs): super(HPE3ParShareDriver, self).__init__((True, False), *args, **kwargs) self.configuration = kwargs.get('configuration', None) self.configuration.append_config_values(HPE3PAR_OPTS) self.configuration.append_config_values(driver.ssh_opts) self.configuration.append_config_values(config.global_opts) self.fpg = None self.vfs = None self.share_ip_address = None self._hpe3par = None # mediator between driver and client def do_setup(self, context): """Any initialization the share driver does while starting.""" LOG.info(_LI("Starting share driver %(driver_name)s (%(version)s)"), {'driver_name': self.__class__.__name__, 'version': self.VERSION}) if not self.driver_handles_share_servers: self.share_ip_address = self.configuration.hpe3par_share_ip_address if not self.share_ip_address: raise exception.HPE3ParInvalid( _("Unsupported configuration. " "hpe3par_share_ip_address must be set when " "driver_handles_share_servers is False.")) mediator = hpe_3par_mediator.HPE3ParMediator( hpe3par_username=self.configuration.hpe3par_username, hpe3par_password=self.configuration.hpe3par_password, hpe3par_api_url=self.configuration.hpe3par_api_url, hpe3par_debug=self.configuration.hpe3par_debug, hpe3par_san_ip=self.configuration.hpe3par_san_ip, hpe3par_san_login=self.configuration.hpe3par_san_login, hpe3par_san_password=self.configuration.hpe3par_san_password, hpe3par_san_ssh_port=self.configuration.hpe3par_san_ssh_port, hpe3par_fstore_per_share=(self.configuration .hpe3par_fstore_per_share), hpe3par_require_cifs_ip=self.configuration.hpe3par_require_cifs_ip, hpe3par_share_ip_address=( self.configuration.hpe3par_share_ip_address), hpe3par_cifs_admin_access_username=( self.configuration.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( self.configuration.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( self.configuration.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=( self.configuration.hpe3par_share_mount_path), my_ip=self.configuration.my_ip, ssh_conn_timeout=self.configuration.ssh_conn_timeout, ) mediator.do_setup() # FPG must be configured and must exist. self.fpg = self.configuration.safe_get('hpe3par_fpg') # Validate the FPG and discover the VFS # This also validates the client, connection, firmware, WSAPI, FPG... self.vfs = mediator.get_vfs_name(self.fpg) # Don't set _hpe3par until it is ready. Otherwise _update_stats fails. self._hpe3par = mediator def check_for_setup_error(self): try: # Log the source SHA for support. Only do this with DEBUG. if LOG.isEnabledFor(logging.DEBUG): LOG.debug('HPE3ParShareDriver SHA1: %s', self.sha1_hash(HPE3ParShareDriver)) LOG.debug('HPE3ParMediator SHA1: %s', self.sha1_hash(hpe_3par_mediator.HPE3ParMediator)) except Exception as e: # Don't let any exceptions during the SHA1 logging interfere # with startup. This is just debug info to identify the source # code. If it doesn't work, just log a debug message. LOG.debug('Source code SHA1 not logged due to: %s', six.text_type(e)) @staticmethod def sha1_hash(clazz): """Get the SHA1 hash for the source of a class.""" source_file = inspect.getsourcefile(clazz) file_size = os.path.getsize(source_file) sha1 = hashlib.sha1() sha1.update(("blob %u\0" % file_size).encode('utf-8')) with open(source_file, 'rb') as f: sha1.update(f.read()) return sha1.hexdigest() def get_network_allocations_number(self): return 1 @staticmethod def _validate_network_type(network_type): if network_type not in ('flat', 'vlan', None): reason = _('Invalid network type. %s is not supported by the ' '3PAR driver.') raise exception.NetworkBadConfigurationException( reason=reason % network_type) def _setup_server(self, network_info, metadata=None): LOG.debug("begin _setup_server with %s", network_info) self._validate_network_type(network_info['network_type']) ip = network_info['network_allocations'][0]['ip_address'] subnet = utils.cidr_to_netmask(network_info['cidr']) vlantag = network_info['segmentation_id'] self._hpe3par.create_fsip(ip, subnet, vlantag, self.fpg, self.vfs) return { 'share_server_name': network_info['server_id'], 'share_server_id': network_info['server_id'], 'ip': ip, 'subnet': subnet, 'vlantag': vlantag if vlantag else 0, 'fpg': self.fpg, 'vfs': self.vfs, } def _teardown_server(self, server_details, security_services=None): LOG.debug("begin _teardown_server with %s", server_details) self._hpe3par.remove_fsip(server_details.get('ip'), server_details.get('fpg'), server_details.get('vfs')) def _get_share_ip(self, share_server): return share_server['backend_details'].get('ip') if share_server else ( self.share_ip_address) @staticmethod def _build_export_location(protocol, ip, path): if not ip: message = _('Failed to build export location due to missing IP.') raise exception.InvalidInput(message) if not path: message = _('Failed to build export location due to missing path.') raise exception.InvalidInput(message) if protocol == 'NFS': location = ':'.join((ip, path)) elif protocol == 'CIFS': location = '\\\\%s\%s' % (ip, path) else: message = _('Invalid protocol. Expected NFS or CIFS. ' 'Got %s.') % protocol raise exception.InvalidInput(message) return location @staticmethod def build_share_comment(share): """Create an informational only comment to help admins and testers.""" info = { 'name': share['display_name'], 'host': share['host'], 'now': datetime.datetime.now().strftime('%H%M%S'), } acceptable = re.compile('[^a-zA-Z0-9_=:@# \-]+', re.UNICODE) comment = ("OpenStack Manila - host=%(host)s orig_name=%(name)s " "created=%(now)s" % info) return acceptable.sub('_', comment)[:254] # clean and truncate def create_share(self, context, share, share_server=None): """Is called to create share.""" ip = self._get_share_ip(share_server) protocol = share['share_proto'] extra_specs = share_types.get_extra_specs_from_share(share) path = self._hpe3par.create_share( share['project_id'], share['id'], protocol, extra_specs, self.fpg, self.vfs, size=share['size'], comment=self.build_share_comment(share) ) return self._build_export_location(protocol, ip, path) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" ip = self._get_share_ip(share_server) protocol = share['share_proto'] extra_specs = share_types.get_extra_specs_from_share(share) path = self._hpe3par.create_share_from_snapshot( share['id'], protocol, extra_specs, share['project_id'], snapshot['share_id'], snapshot['id'], self.fpg, self.vfs, comment=self.build_share_comment(share) ) return self._build_export_location(protocol, ip, path) def delete_share(self, context, share, share_server=None): """Deletes share and its fstore.""" self._hpe3par.delete_share(share['project_id'], share['id'], share['share_proto'], self.fpg, self.vfs) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of a share.""" self._hpe3par.create_snapshot(snapshot['share']['project_id'], snapshot['share']['id'], snapshot['share']['share_proto'], snapshot['id'], self.fpg, self.vfs) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot of a share.""" self._hpe3par.delete_snapshot(snapshot['share']['project_id'], snapshot['share']['id'], snapshot['share']['share_proto'], snapshot['id'], self.fpg, self.vfs) def ensure_share(self, context, share, share_server=None): pass def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" extra_specs = None if 'NFS' == share['share_proto']: # Avoiding DB call otherwise extra_specs = share_types.get_extra_specs_from_share(share) self._hpe3par.allow_access(share['project_id'], share['id'], share['share_proto'], extra_specs, access['access_type'], access['access_to'], access['access_level'], self.fpg, self.vfs) def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" self._hpe3par.deny_access(share['project_id'], share['id'], share['share_proto'], access['access_type'], access['access_to'], access['access_level'], self.fpg, self.vfs) def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" self._hpe3par.resize_share(share['project_id'], share['id'], share['share_proto'], new_size, share['size'], self.fpg, self.vfs) def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" self._hpe3par.resize_share(share['project_id'], share['id'], share['share_proto'], new_size, share['size'], self.fpg, self.vfs) def _update_share_stats(self): """Retrieve stats info from share group.""" backend_name = self.configuration.safe_get( 'share_backend_name') or "HPE_3PAR" max_over_subscription_ratio = self.configuration.safe_get( 'max_over_subscription_ratio') reserved_share_percentage = self.configuration.safe_get( 'reserved_share_percentage') if reserved_share_percentage is None: reserved_share_percentage = 0 stats = { 'share_backend_name': backend_name, 'driver_handles_share_servers': self.driver_handles_share_servers, 'vendor_name': 'HPE', 'driver_version': self.VERSION, 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': reserved_share_percentage, 'max_over_subscription_ratio': max_over_subscription_ratio, 'qos': False, 'thin_provisioning': True, # 3PAR default is thin } if not self._hpe3par: LOG.info( _LI("Skipping capacity and capabilities update. Setup has not " "completed.")) else: fpg_status = self._hpe3par.get_fpg_status(self.fpg) LOG.debug("FPG status = %s.", fpg_status) stats.update(fpg_status) super(HPE3ParShareDriver, self)._update_share_stats(stats) manila-2.0.0/manila/share/drivers/hpe/hpe_3par_mediator.py0000664000567000056710000015727212701407107024664 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Mediator for OpenStack Manila. This 'mediator' de-couples the 3PAR focused client from the OpenStack focused driver. """ from oslo_log import log from oslo_utils import importutils from oslo_utils import units import six from manila import exception from manila import utils from manila.i18n import _, _LI, _LW hpe3parclient = importutils.try_import("hpe3parclient") if hpe3parclient: from hpe3parclient import file_client LOG = log.getLogger(__name__) MIN_CLIENT_VERSION = (4, 0, 0) DENY = '-' ALLOW = '+' OPEN_STACK_MANILA = 'OpenStack Manila' FULL = 1 THIN = 2 DEDUPE = 6 ENABLED = 1 DISABLED = 2 CACHE = 'cache' CONTINUOUS_AVAIL = 'continuous_avail' ACCESS_BASED_ENUM = 'access_based_enum' SMB_EXTRA_SPECS_MAP = { CACHE: CACHE, CONTINUOUS_AVAIL: 'ca', ACCESS_BASED_ENUM: 'abe', } IP_ALREADY_EXISTS = 'IP address %s already exists' USER_ALREADY_EXISTS = '"allow" permission already exists for "%s"' DOES_NOT_EXIST = 'does not exist, cannot' LOCAL_IP = '127.0.0.1' LOCAL_IP_RO = '127.0.0.2' SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' class HPE3ParMediator(object): """3PAR client-facing code for the 3PAR driver. Version history: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 1.0.3 - Use hp3par prefix for share types and capabilities 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Fix SMB read-only access (added in 2.0.1) 2.0.4 - Remove file tree on delete when using nested shares #1538800 """ VERSION = "2.0.4" def __init__(self, **kwargs): self.hpe3par_username = kwargs.get('hpe3par_username') self.hpe3par_password = kwargs.get('hpe3par_password') self.hpe3par_api_url = kwargs.get('hpe3par_api_url') self.hpe3par_debug = kwargs.get('hpe3par_debug') self.hpe3par_san_ip = kwargs.get('hpe3par_san_ip') self.hpe3par_san_login = kwargs.get('hpe3par_san_login') self.hpe3par_san_password = kwargs.get('hpe3par_san_password') self.hpe3par_san_ssh_port = kwargs.get('hpe3par_san_ssh_port') self.hpe3par_san_private_key = kwargs.get('hpe3par_san_private_key') self.hpe3par_fstore_per_share = kwargs.get('hpe3par_fstore_per_share') self.hpe3par_require_cifs_ip = kwargs.get('hpe3par_require_cifs_ip') self.hpe3par_share_ip_address = kwargs.get('hpe3par_share_ip_address') self.hpe3par_cifs_admin_access_username = ( kwargs.get('hpe3par_cifs_admin_access_username')) self.hpe3par_cifs_admin_access_password = ( kwargs.get('hpe3par_cifs_admin_access_password')) self.hpe3par_cifs_admin_access_domain = ( kwargs.get('hpe3par_cifs_admin_access_domain')) self.hpe3par_share_mount_path = kwargs.get('hpe3par_share_mount_path') self.my_ip = kwargs.get('my_ip') self.ssh_conn_timeout = kwargs.get('ssh_conn_timeout') self._client = None self.client_version = None @staticmethod def no_client(): return hpe3parclient is None def do_setup(self): if self.no_client(): msg = _('You must install hpe3parclient before using the 3PAR ' 'driver. Run "pip install --upgrade python-3parclient" ' 'to upgrade the hpe3parclient.') LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) self.client_version = hpe3parclient.version_tuple if self.client_version < MIN_CLIENT_VERSION: msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': '.'.join(map(six.text_type, self.client_version)), 'minimum': '.'.join(map(six.text_type, MIN_CLIENT_VERSION))}) LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) try: self._client = file_client.HPE3ParFilePersonaClient( self.hpe3par_api_url) except Exception as e: msg = (_('Failed to connect to HPE 3PAR File Persona Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) try: ssh_kwargs = {} if self.hpe3par_san_ssh_port: ssh_kwargs['port'] = self.hpe3par_san_ssh_port if self.ssh_conn_timeout: ssh_kwargs['conn_timeout'] = self.ssh_conn_timeout if self.hpe3par_san_private_key: ssh_kwargs['privatekey'] = self.hpe3par_san_private_key self._client.setSSHOptions( self.hpe3par_san_ip, self.hpe3par_san_login, self.hpe3par_san_password, **ssh_kwargs ) except Exception as e: msg = (_('Failed to set SSH options for HPE 3PAR File Persona ' 'Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) LOG.info(_LI("HPE3ParMediator %(version)s, " "hpe3parclient %(client_version)s"), {"version": self.VERSION, "client_version": hpe3parclient.get_version_string()}) try: wsapi_version = self._client.getWsApiVersion()['build'] LOG.info(_LI("3PAR WSAPI %s"), wsapi_version) except Exception as e: msg = (_('Failed to get 3PAR WSAPI version: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) if self.hpe3par_debug: self._client.debug_rest(True) # Includes SSH debug (setSSH above) def _wsapi_login(self): try: self._client.login(self.hpe3par_username, self.hpe3par_password) except Exception as e: msg = (_("Failed to Login to 3PAR (%(url)s) as %(user)s " "because: %(err)s") % {'url': self.hpe3par_api_url, 'user': self.hpe3par_username, 'err': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _wsapi_logout(self): try: self._client.http.unauthenticate() except Exception as e: msg = _LW("Failed to Logout from 3PAR (%(url)s) because %(err)s") LOG.warning(msg, {'url': self.hpe3par_api_url, 'err': six.text_type(e)}) # don't raise exception on logout() def get_provisioned_gb(self, fpg): total_mb = 0 try: result = self._client.getfsquota(fpg=fpg) except Exception as e: result = {'message': six.text_type(e)} error_msg = result.get('message') if error_msg: message = (_('Error while getting fsquotas for FPG ' '%(fpg)s: %(msg)s') % {'fpg': fpg, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) for fsquota in result['members']: total_mb += float(fsquota['hardBlock']) return total_mb / units.Ki def get_fpg_status(self, fpg): """Get capacity and capabilities for FPG.""" try: result = self._client.getfpg(fpg) except Exception as e: msg = (_('Failed to get capacity for fpg %(fpg)s: %(e)s') % {'fpg': fpg, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get capacity for fpg %s.') % fpg) LOG.error(msg) raise exception.ShareBackendException(msg=msg) member = result['members'][0] total_capacity_gb = float(member['capacityKiB']) / units.Mi free_capacity_gb = float(member['availCapacityKiB']) / units.Mi volumes = member['vvs'] if isinstance(volumes, list): volume = volumes[0] # Use first name from list else: volume = volumes # There is just a name self._wsapi_login() try: volume_info = self._client.getVolume(volume) volume_set = self._client.getVolumeSet(fpg) finally: self._wsapi_logout() provisioning_type = volume_info['provisioningType'] if provisioning_type not in (THIN, FULL, DEDUPE): msg = (_('Unexpected provisioning type for FPG %(fpg)s: ' '%(ptype)s.') % {'fpg': fpg, 'ptype': provisioning_type}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) dedupe = provisioning_type == DEDUPE thin_provisioning = provisioning_type in (THIN, DEDUPE) flash_cache_policy = volume_set.get('flashCachePolicy', DISABLED) hpe3par_flash_cache = flash_cache_policy == ENABLED status = { 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'thin_provisioning': thin_provisioning, 'dedupe': dedupe, 'hpe3par_flash_cache': hpe3par_flash_cache, 'hp3par_flash_cache': hpe3par_flash_cache, } if thin_provisioning: status['provisioned_capacity_gb'] = self.get_provisioned_gb(fpg) return status @staticmethod def ensure_supported_protocol(share_proto): protocol = share_proto.lower() if protocol == 'cifs': protocol = 'smb' if protocol not in ['smb', 'nfs']: message = (_('Invalid protocol. Expected nfs or smb. Got %s.') % protocol) LOG.error(message) raise exception.InvalidInput(message) return protocol @staticmethod def other_protocol(share_proto): """Given 'nfs' or 'smb' (or equivalent) return the other one.""" protocol = HPE3ParMediator.ensure_supported_protocol(share_proto) return 'nfs' if protocol == 'smb' else 'smb' @staticmethod def ensure_prefix(uid, protocol=None, readonly=False): if uid.startswith('osf-'): return uid if protocol: proto = '-%s' % HPE3ParMediator.ensure_supported_protocol(protocol) else: proto = '' if readonly: ro = '-ro' else: ro = '' # Format is osf[-ro]-{nfs|smb}-uid return 'osf%s%s-%s' % (proto, ro, uid) @staticmethod def _get_nfs_options(extra_specs, readonly): """Validate the NFS extra_specs and return the options to use.""" nfs_options = extra_specs.get('hpe3par:nfs_options') if nfs_options is None: nfs_options = extra_specs.get('hp3par:nfs_options') if nfs_options: msg = _LW("hp3par:nfs_options is deprecated. Use " "hpe3par:nfs_options instead.") LOG.warning(msg) if nfs_options: options = nfs_options.split(',') else: options = [] # rw, ro, and (no)root_squash (in)secure options are not allowed in # extra_specs because they will be forcibly set below. # no_subtree_check and fsid are not allowed per 3PAR support. # Other strings will be allowed to be sent to the 3PAR which will do # further validation. options_not_allowed = ['ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'no_subtree_check', 'fsid'] invalid_options = [ option for option in options if option in options_not_allowed ] if invalid_options: raise exception.InvalidInput(_('Invalid hp3par:nfs_options or ' 'hpe3par:nfs_options in ' 'extra-specs. The following ' 'options are not allowed: %s') % invalid_options) options.append('ro' if readonly else 'rw') options.append('no_root_squash') options.append('insecure') return ','.join(options) def _build_createfshare_kwargs(self, protocol, fpg, fstore, readonly, sharedir, extra_specs, comment): createfshare_kwargs = dict(fpg=fpg, fstore=fstore, sharedir=sharedir, comment=comment) if 'hp3par_flash_cache' in extra_specs: msg = _LW("hp3par_flash_cache is deprecated. Use " "hpe3par_flash_cache instead.") LOG.warning(msg) if protocol == 'nfs': # New NFS shares needs seed IP to prevent "all" access. # Readonly and readwrite NFS shares client IPs cannot overlap. if readonly: createfshare_kwargs['clientip'] = LOCAL_IP_RO else: createfshare_kwargs['clientip'] = LOCAL_IP options = self._get_nfs_options(extra_specs, readonly) createfshare_kwargs['options'] = options else: # To keep the original (Kilo, Liberty) behavior where CIFS IP # access rules were required in addition to user rules enable # this to use a local seed IP instead of the default (all allowed). if self.hpe3par_require_cifs_ip: createfshare_kwargs['allowip'] = LOCAL_IP smb_opts = (ACCESS_BASED_ENUM, CONTINUOUS_AVAIL, CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value is None: opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) if opt_value: msg = _LW("hp3par:smb_* is deprecated. Use " "hpe3par:smb_* instead.") LOG.warning(msg) if opt_value: opt_key = SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value return createfshare_kwargs def _update_capacity_quotas(self, fstore, new_size, old_size, fpg, vfs): @utils.synchronized('hpe3par-update-quota-' + fstore) def _sync_update_capacity_quotas(fstore, new_size, old_size, fpg, vfs): """Update 3PAR quotas and return setfsquota output.""" if self.hpe3par_fstore_per_share: hcapacity = six.text_type(new_size * units.Ki) scapacity = hcapacity else: hard_size_mb = (new_size - old_size) * units.Ki soft_size_mb = hard_size_mb result = self._client.getfsquota( fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfsquota result=%s", result) quotas = result['members'] if len(quotas) == 1: hard_size_mb += int(quotas[0].get('hardBlock', '0')) soft_size_mb += int(quotas[0].get('softBlock', '0')) hcapacity = six.text_type(hard_size_mb) scapacity = six.text_type(soft_size_mb) return self._client.setfsquota(vfs, fpg=fpg, fstore=fstore, scapacity=scapacity, hcapacity=hcapacity) try: result = _sync_update_capacity_quotas( fstore, new_size, old_size, fpg, vfs) LOG.debug("setfsquota result=%s", result) except Exception as e: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with exception: %(e)s') % {'size': new_size - old_size, 'fstore': fstore, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) # Non-empty result is an error message returned from the 3PAR if result: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with error: %(error)s') % {'size': new_size - old_size, 'fstore': fstore, 'error': result}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _create_share(self, project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment): share_name = self.ensure_prefix(share_id, readonly=readonly) if not (sharedir or self.hpe3par_fstore_per_share): sharedir = share_name if fstore: use_existing_fstore = True else: use_existing_fstore = False if self.hpe3par_fstore_per_share: # Do not use -ro in the fstore name. fstore = self.ensure_prefix(share_id, readonly=False) else: fstore = self.ensure_prefix(project_id, protocol) createfshare_kwargs = self._build_createfshare_kwargs(protocol, fpg, fstore, readonly, sharedir, extra_specs, comment) if not use_existing_fstore: try: result = self._client.createfstore( vfs, fstore, fpg=fpg, comment=comment) LOG.debug("createfstore result=%s", result) except Exception as e: msg = (_('Failed to create fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg) if size: self._update_capacity_quotas(fstore, size, 0, fpg, vfs) try: if readonly and protocol == 'nfs': # For NFS, RO is a 2nd 3PAR share pointing to same sharedir share_name = self.ensure_prefix(share_id, readonly=readonly) result = self._client.createfshare(protocol, vfs, share_name, **createfshare_kwargs) LOG.debug("createfshare result=%s", result) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg) try: result = self._client.getfshare( protocol, share_name, fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfshare result=%s", result) except Exception as e: msg = (_('Failed to get fshare %(share_name)s after creating it: ' '%(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg) if result['total'] != 1: msg = (_('Failed to get fshare %(share_name)s after creating it. ' 'Expected to get 1 fshare. Got %(total)s.') % {'share_name': share_name, 'total': result['total']}) LOG.error(msg) raise exception.ShareBackendException(msg) return result['members'][0] def create_share(self, project_id, share_id, share_proto, extra_specs, fpg, vfs, fstore=None, sharedir=None, readonly=False, size=None, comment=OPEN_STACK_MANILA): """Create the share and return its path. This method can create a share when called by the driver or when called locally from create_share_from_snapshot(). The optional parameters allow re-use. :param project_id: The tenant ID. :param share_id: The share-id with or without osf- prefix. :param share_proto: The protocol (to map to smb or nfs) :param extra_specs: The share type extra-specs :param fpg: The file provisioning group :param vfs: The virtual file system :param fstore: (optional) The file store. When provided, an existing file store is used. Otherwise one is created. :param sharedir: (optional) Share directory. :param readonly: (optional) Create share as read-only. :param size: (optional) Size limit for file store if creating one. :return: share path string """ protocol = self.ensure_supported_protocol(share_proto) share = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment) if protocol == 'nfs': return share['sharePath'] else: return share['shareName'] def create_share_from_snapshot(self, share_id, share_proto, extra_specs, orig_project_id, orig_share_id, snapshot_id, fpg, vfs, comment=OPEN_STACK_MANILA): protocol = self.ensure_supported_protocol(share_proto) snapshot_tag = self.ensure_prefix(snapshot_id) orig_share_name = self.ensure_prefix(orig_share_id) snapshot = self._find_fsnap(orig_project_id, orig_share_name, protocol, snapshot_tag, fpg, vfs) if not snapshot: msg = (_('Failed to create share from snapshot for ' 'FPG/VFS/tag %(fpg)s/%(vfs)s/%(tag)s. ' 'Snapshot not found.') % { 'fpg': fpg, 'vfs': vfs, 'tag': snapshot_tag}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = snapshot['fstoreName'] if fstore == orig_share_name: # No subdir for original share created with fstore_per_share sharedir = '.snapshot/%s' % snapshot['snapName'] else: sharedir = '.snapshot/%s/%s' % (snapshot['snapName'], orig_share_name) return self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=comment, ) def _delete_share(self, share_name, protocol, fpg, vfs, fstore): try: self._client.removefshare( protocol, vfs, share_name, fpg=fpg, fstore=fstore) except Exception as e: msg = (_('Failed to remove share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def delete_share(self, project_id, share_id, share_proto, fpg, vfs): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, protocol, fpg, vfs, allow_cross_protocol=True) if fstore: self._delete_share(share_name, protocol, fpg, vfs, fstore) share_name_ro = self.ensure_prefix(share_id, readonly=True) if not fstore: fstore = self._find_fstore(project_id, share_name_ro, protocol, fpg, vfs, allow_cross_protocol=True) if fstore: self._delete_share(share_name_ro, protocol, fpg, vfs, fstore) if not self.hpe3par_fstore_per_share: # Attempt to remove file tree on delete when using nested shares. # If the file tree cannot be removed for whatever reason, we will # not treat this as an error_deleting issue. We will allow the # delete to continue as requested. self._delete_file_tree(share_name, protocol, fpg, vfs, fstore) if fstore == share_name: try: self._client.removefstore(vfs, fstore, fpg=fpg) except Exception as e: msg = (_('Failed to remove fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def _delete_file_tree(self, share_name, protocol, fpg, vfs, fstore): # If the share protocol is CIFS, we need to make sure the admin # provided the proper config values. If they have not, we can simply # return out and log a warning. if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning(_LW("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for the file tree to be " "properly deleted.")) return mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name) share_dir = mount_location + "/%s" % share_name # Create the super share. self._create_super_share(protocol, fpg, vfs, fstore) # Create the mount directory. self._create_mount_directory(mount_location) # Mount the super share. self._mount_super_share(protocol, mount_location, fpg, vfs, fstore) # Delete the share from the super share. self._delete_share_directory(share_dir) # Unmount the super share. self._unmount_super_share(mount_location) # Delete the mount directory. self._delete_share_directory(mount_location) def _create_super_share(self, protocol, fpg, vfs, fstore, readonly=False): sharedir = '' extra_specs = {} comment = 'OpenStack super share used to delete nested shares.' createfshare_kwargs = self._build_createfshare_kwargs(protocol, fpg, fstore, readonly, sharedir, extra_specs, comment) # If the share is NFS, we need to give the host access to the share in # order to properly mount it. if protocol == 'nfs': createfshare_kwargs['clientip'] = self.my_ip else: createfshare_kwargs['allowip'] = self.my_ip try: result = self._client.createfshare(protocol, vfs, SUPER_SHARE, **createfshare_kwargs) LOG.debug("createfshare for %(name)s, result=%(result)s", {'name': SUPER_SHARE, 'result': result}) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s'), {'share_name': SUPER_SHARE, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # If the share is CIFS, we need to grant access to the specified admin. if protocol == 'smb': user = '+%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: result = self._client.setfshare( protocol, vfs, SUPER_SHARE, **setfshare_kwargs) except Exception as err: message = (_("There was an error adding permissions: " "%s.") % six.text_type(err)) raise exception.ShareMountException(reason=message) def _create_mount_directory(self, mount_location): try: utils.execute('mkdir', mount_location, run_as_root=True) except Exception as err: message = (_LW("There was an error creating mount directory: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _mount_super_share(self, protocol, mount_location, fpg, vfs, fstore): try: mount_path = self._generate_mount_path(protocol, fpg, vfs, fstore) if protocol == 'nfs': utils.execute('mount', '-t', 'nfs', mount_path, mount_location, run_as_root=True) LOG.debug("Execute mount. mount_location: %s", mount_location) else: user = ('username=' + self.hpe3par_cifs_admin_access_username + ',password=' + self.hpe3par_cifs_admin_access_password + ',domain=' + self.hpe3par_cifs_admin_access_domain) utils.execute('mount', '-t', 'cifs', mount_path, mount_location, '-o', user, run_as_root=True) except Exception as err: message = (_LW("There was an error mounting the super share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _unmount_super_share(self, mount_location): try: utils.execute('umount', mount_location, run_as_root=True) except Exception as err: message = (_LW("There was an error unmounting the super share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _delete_share_directory(self, directory): try: utils.execute('rm', '-rf', directory, run_as_root=True) except Exception as err: message = (_LW("There was an error removing the share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _generate_mount_path(self, protocol, fpg, vfs, fstore): path = None if protocol == 'nfs': path = (("%(share_ip)s:/%(fpg)s/%(vfs)s/%(fstore)s/") % {'share_ip': self.hpe3par_share_ip_address, 'fpg': fpg, 'vfs': vfs, 'fstore': fstore}) else: path = (("//%(share_ip)s/%(share_name)s/") % {'share_ip': self.hpe3par_share_ip_address, 'share_name': SUPER_SHARE}) return path def get_vfs_name(self, fpg): return self.get_vfs(fpg)['vfsname'] def get_vfs(self, fpg, vfs=None): """Get the VFS or raise an exception.""" try: result = self._client.getvfs(fpg=fpg, vfs=vfs) except Exception as e: msg = (_('Exception during getvfs %(vfs)s: %(e)s') % {'vfs': vfs, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg) if result['total'] != 1: error_msg = result.get('message') if error_msg: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): %(msg)s') % {'fpg': fpg, 'vfs': vfs, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(message) else: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): Expected 1, ' 'got %(total)s.') % {'fpg': fpg, 'vfs': vfs, 'total': result['total']}) LOG.error(message) raise exception.ShareBackendException(message) return result['members'][0] @staticmethod def _is_share_from_snapshot(fshare): path = fshare.get('shareDir') if path: return '.snapshot' in path.split('/') path = fshare.get('sharePath') return path and '.snapshot' in path.split('/') def create_snapshot(self, orig_project_id, orig_share_id, orig_share_proto, snapshot_id, fpg, vfs): """Creates a snapshot of a share.""" fshare = self._find_fshare(orig_project_id, orig_share_id, orig_share_proto, fpg, vfs) if not fshare: msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Failed to find fshare.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if self._is_share_from_snapshot(fshare): msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Share is a read-only ' 'share of an existing snapshot.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = fshare.get('fstoreName') snapshot_tag = self.ensure_prefix(snapshot_id) try: result = self._client.createfsnap( vfs, fstore, snapshot_tag, fpg=fpg) LOG.debug("createfsnap result=%s", result) except Exception as e: msg = (_('Failed to create snapshot for FPG/VFS/fstore ' '%(fpg)s/%(vfs)s/%(fstore)s: %(e)s') % {'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def delete_snapshot(self, orig_project_id, orig_share_id, orig_proto, snapshot_id, fpg, vfs): """Deletes a snapshot of a share.""" snapshot_tag = self.ensure_prefix(snapshot_id) snapshot = self._find_fsnap(orig_project_id, orig_share_id, orig_proto, snapshot_tag, fpg, vfs) if not snapshot: return fstore = snapshot.get('fstoreName') for protocol in ('nfs', 'smb'): try: shares = self._client.getfshare(protocol, fpg=fpg, vfs=vfs, fstore=fstore) except Exception as e: msg = (_('Unexpected exception while getting share list. ' 'Cannot delete snapshot without checking for ' 'dependent shares first: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(msg) for share in shares['members']: if protocol == 'nfs': path = share['sharePath'][1:].split('/') dot_snapshot_index = 3 else: if share['shareDir']: path = share['shareDir'].split('/') else: path = None dot_snapshot_index = 0 snapshot_index = dot_snapshot_index + 1 if path and len(path) > snapshot_index: if (path[dot_snapshot_index] == '.snapshot' and path[snapshot_index].endswith(snapshot_tag)): msg = (_('Cannot delete snapshot because it has a ' 'dependent share.')) raise exception.Invalid(msg) snapname = snapshot['snapName'] try: result = self._client.removefsnap( vfs, fstore, snapname=snapname, fpg=fpg) LOG.debug("removefsnap result=%s", result) except Exception as e: msg = (_('Failed to delete snapshot for FPG/VFS/fstore/snapshot ' '%(fpg)s/%(vfs)s/%(fstore)s/%(snapname)s: %(e)s') % { 'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'snapname': snapname, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg) # Try to reclaim the space try: self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed') except Exception as e: # Remove already happened so only log this. msg = (_('Unexpected exception calling startfsnapclean for FPG ' '%(fpg)s: %(e)s') % {'fpg': fpg, 'e': six.text_type(e)}) LOG.exception(msg) @staticmethod def _validate_access_type(protocol, access_type): if access_type not in ('ip', 'user'): msg = (_("Invalid access type. Expected 'ip' or 'user'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.InvalidInput(msg) if protocol == 'nfs' and access_type != 'ip': msg = (_("Invalid NFS access type. HPE 3PAR NFS supports 'ip'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.HPE3ParInvalid(msg) return protocol @staticmethod def _validate_access_level(protocol, access_type, access_level, fshare): readonly = access_level == 'ro' snapshot = HPE3ParMediator._is_share_from_snapshot(fshare) if snapshot and not readonly: reason = _('3PAR shares from snapshots require read-only access') LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) if protocol == 'smb' and access_type == 'ip' and snapshot != readonly: msg = (_("Invalid CIFS access rule. HPE 3PAR optionally supports " "IP access rules for CIFS shares, but they must be " "read-only for shares from snapshots and read-write for " "other shares. Use the required CIFS 'user' access rules " "to refine access.")) LOG.error(msg) raise exception.InvalidShareAccess(reason=msg) @staticmethod def ignore_benign_access_results(plus_or_minus, access_type, access_to, result): # TODO(markstur): Remove the next line when hpe3parclient is fixed. result = [x for x in result if x != '\r'] if result: if plus_or_minus == DENY: if DOES_NOT_EXIST in result[0]: return None else: if access_type == 'user': if USER_ALREADY_EXISTS % access_to in result[0]: return None elif IP_ALREADY_EXISTS % access_to in result[0]: return None return result def _change_access(self, plus_or_minus, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs, extra_specs=None): """Allow or deny access to a share. Plus_or_minus character indicates add to allow list (+) or remove from allow list (-). """ readonly = access_level == 'ro' protocol = self.ensure_supported_protocol(share_proto) try: self._validate_access_type(protocol, access_type) except Exception: if plus_or_minus == DENY: # Catch invalid rules for deny. Allow them to be deleted. return else: raise fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=readonly) if not fshare: # Change access might apply to the share with the name that # does not match the access_level prefix. other_fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=not readonly) if other_fshare: if plus_or_minus == DENY: # Try to deny rule from 'other' share for SMB or legacy. fshare = other_fshare elif self._is_share_from_snapshot(other_fshare): # Found a share-from-snapshot from before # "-ro" was added to the name. Use it. fshare = other_fshare elif protocol == 'nfs': # We don't have the RO|RW share we need, but the # opposite one already exists. It is OK to create # the one we need for ALLOW with NFS (not from snapshot). fstore = other_fshare.get('fstoreName') sharedir = other_fshare.get('shareDir') comment = other_fshare.get('comment') fshare = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=readonly, size=None, comment=comment) else: # SMB only has one share for RO and RW. Try to use it. fshare = other_fshare if not fshare: msg = _('Failed to change (%(change)s) access ' 'to FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): ' 'Share does not exist on 3PAR.') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, } if plus_or_minus == DENY: LOG.warning(msg, msg_data) return else: raise exception.HPE3ParInvalid(err=msg % msg_data) try: self._validate_access_level( protocol, access_type, access_level, fshare) except exception.InvalidShareAccess as e: if plus_or_minus == DENY: # Allow invalid access rules to be deleted. msg = _('Ignoring deny invalid access rule ' 'for FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): %(e)s') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'e': six.text_type(e), } LOG.info(msg, msg_data) return else: raise share_name = fshare.get('shareName') setfshare_kwargs = { 'fpg': fpg, 'fstore': fshare.get('fstoreName'), 'comment': fshare.get('comment'), } if protocol == 'nfs': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['clientip'] = access_change elif protocol == 'smb': if access_type == 'ip': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['allowip'] = access_change else: access_str = 'read' if readonly else 'fullcontrol' perm = '%s%s:%s' % (plus_or_minus, access_to, access_str) setfshare_kwargs['allowperm'] = perm try: result = self._client.setfshare( protocol, vfs, share_name, **setfshare_kwargs) result = self.ignore_benign_access_results( plus_or_minus, access_type, access_to, result) except Exception as e: result = six.text_type(e) LOG.debug("setfshare result=%s", result) if result: msg = (_('Failed to change (%(change)s) access to FPG/share ' '%(fpg)s/%(share)s for %(type)s %(to)s %(level)s: ' '%(error)s') % {'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'error': result}) raise exception.ShareBackendException(msg=msg) def _find_fstore(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False): share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=allow_cross_protocol) return share.get('fstoreName') if share else None def _find_fshare(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False, readonly=False): share = self._find_fshare_with_proto(project_id, share_id, share_proto, fpg, vfs, readonly=readonly) if not share and allow_cross_protocol: other_proto = self.other_protocol(share_proto) share = self._find_fshare_with_proto(project_id, share_id, other_proto, fpg, vfs, readonly=readonly) return share def _find_fshare_with_proto(self, project_id, share_id, share_proto, fpg, vfs, readonly=False): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id, readonly=readonly) project_fstore = self.ensure_prefix(project_id, share_proto) search_order = [ {'fpg': fpg, 'vfs': vfs, 'fstore': project_fstore}, {'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'fpg': fpg}, {} ] try: for search_params in search_order: result = self._client.getfshare(protocol, share_name, **search_params) shares = result.get('members', []) if len(shares) == 1: return shares[0] except Exception as e: msg = (_('Unexpected exception while getting share list: %s') % six.text_type(e)) raise exception.ShareBackendException(msg=msg) def _find_fsnap(self, project_id, share_id, orig_proto, snapshot_tag, fpg, vfs): share_name = self.ensure_prefix(share_id) osf_project_id = self.ensure_prefix(project_id, orig_proto) pattern = '*_%s' % self.ensure_prefix(snapshot_tag) search_order = [ {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': osf_project_id}, {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'pat': True, 'fpg': fpg}, {'pat': True}, ] try: for search_params in search_order: result = self._client.getfsnap(pattern, **search_params) snapshots = result.get('members', []) if len(snapshots) == 1: return snapshots[0] except Exception as e: msg = (_('Unexpected exception while getting snapshots: %s') % six.text_type(e)) raise exception.ShareBackendException(msg) def allow_access(self, project_id, share_id, share_proto, extra_specs, access_type, access_to, access_level, fpg, vfs): """Grant access to a share.""" self._change_access(ALLOW, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs, extra_specs=extra_specs) def deny_access(self, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs): """Deny access to a share.""" self._change_access(DENY, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs) def resize_share(self, project_id, share_id, share_proto, new_size, old_size, fpg, vfs): """Extends or shrinks size of existing share.""" share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, share_proto, fpg, vfs, allow_cross_protocol=False) if not fstore: msg = (_('Cannot resize share because it was not found.')) raise exception.InvalidShare(reason=msg) self._update_capacity_quotas(fstore, new_size, old_size, fpg, vfs) def fsip_exists(self, fsip): """Try to get FSIP. Return True if it exists.""" vfs = fsip['vfs'] fpg = fsip['fspool'] try: result = self._client.getfsip(vfs, fpg=fpg) LOG.debug("getfsip result: %s", result) except Exception as e: LOG.exception(e) msg = (_('Failed to get FSIPs for FPG/VFS %(fspool)s/%(vfs)s.') % fsip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for member in result['members']: if all(item in member.items() for item in fsip.items()): return True return False def create_fsip(self, ip, subnet, vlantag, fpg, vfs): vlantag_str = six.text_type(vlantag) if vlantag else '0' # Try to create it. It's OK if it already exists. try: result = self._client.createfsip(ip, subnet, vfs, fpg=fpg, vlantag=vlantag_str) LOG.debug("createfsip result: %s", result) except Exception as e: LOG.exception(e) msg = (_('Failed to create FSIP for %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, 'prefixLen': subnet, 'vlanTag': vlantag_str, } if not self.fsip_exists(fsip): msg = (_('Failed to get FSIP after creating it for ' 'FPG/VFS/IP/subnet/VLAN ' '%(fspool)s/%(vfs)s/' '%(address)s/%(prefixLen)s/%(vlanTag)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def remove_fsip(self, ip, fpg, vfs): if not (vfs and ip): # If there is no VFS and/or IP, then there is no FSIP to remove. return try: result = self._client.removefsip(vfs, ip, fpg=fpg) LOG.debug("removefsip result: %s", result) except Exception as e: LOG.exception(e) msg = (_('Failed to remove FSIP %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really no longer exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, } if self.fsip_exists(fsip): msg = (_('Failed to remove FSIP for FPG/VFS/IP ' '%(fspool)s/%(vfs)s/%(address)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) manila-2.0.0/manila/share/drivers/tegile/0000775000567000056710000000000012701407265021411 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/tegile/__init__.py0000664000567000056710000000000012701407107023503 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/tegile/tegile.py0000664000567000056710000004622512701407107023240 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for Tegile storage. """ import json import requests import six from oslo_config import cfg from oslo_log import log from manila import utils from manila.i18n import _, _LI, _LW from manila import exception from manila.share import driver from manila.share import utils as share_utils tegile_opts = [ cfg.StrOpt('tegile_nas_server', help='Tegile NAS server hostname or IP address.'), cfg.StrOpt('tegile_nas_login', help='User name for the Tegile NAS server.'), cfg.StrOpt('tegile_nas_password', help='Password for the Tegile NAS server.'), cfg.StrOpt('tegile_default_project', help='Create shares in this project')] CONF = cfg.CONF CONF.register_opts(tegile_opts) LOG = log.getLogger(__name__) DEFAULT_API_SERVICE = 'openstack' TEGILE_API_PATH = 'zebi/api' TEGILE_LOCAL_CONTAINER_NAME = 'Local' TEGILE_SNAPSHOT_PREFIX = 'Manual-S-' VENDOR = 'Tegile Systems Inc.' DEFAULT_BACKEND_NAME = 'Tegile' VERSION = '1.0.0' DEBUG_LOGGING = False # For debugging purposes def debugger(func): """Returns a wrapper that wraps func. The wrapper will log the entry and exit points of the function. """ def wrapper(*args, **kwds): if DEBUG_LOGGING: LOG.debug('Entering %(classname)s.%(funcname)s', { 'classname': args[0].__class__.__name__, 'funcname': func.__name__, }) LOG.debug('Arguments: %(args)s, %(kwds)s', { 'args': args[1:], 'kwds': kwds, }) f_result = func(*args, **kwds) if DEBUG_LOGGING: LOG.debug('Exiting %(classname)s.%(funcname)s', { 'classname': args[0].__class__.__name__, 'funcname': func.__name__, }) LOG.debug('Results: %(result)s', {'result': f_result}) return f_result return wrapper class TegileAPIExecutor(object): def __init__(self, classname, hostname, username, password): self._classname = classname self._hostname = hostname self._username = username self._password = password def __call__(self, *args, **kwargs): return self._send_api_request(*args, **kwargs) @debugger @utils.retry(exception=(requests.ConnectionError, requests.Timeout), interval=30, retries=3, backoff_rate=1) def _send_api_request(self, method, params=None, request_type='post', api_service=DEFAULT_API_SERVICE, fine_logging=DEBUG_LOGGING): if params is not None: params = json.dumps(params) url = 'https://%s/%s/%s/%s' % (self._hostname, TEGILE_API_PATH, api_service, method) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'url: %(url)s', { 'classname': self._classname, 'method': method, 'url': url, }) if request_type == 'post': if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, payload: %(payload)s', { 'classname': self._classname, 'method': method, 'payload': params, }) req = requests.post(url, data=params, auth=(self._username, self._password), verify=False) else: req = requests.get(url, auth=(self._username, self._password), verify=False) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'return code: %(retcode)s', { 'classname': self._classname, 'method': method, 'retcode': req, }) try: response = req.json() if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, response: %(response)s', { 'classname': self._classname, 'method': method, 'response': response, }) except ValueError: # Some APIs don't return output and that's fine response = '' req.close() if req.status_code != 200: raise exception.TegileAPIException(response=req.text) return response class TegileShareDriver(driver.ShareDriver): """Tegile NAS driver. Allows for NFS and CIFS NAS storage usage.""" def __init__(self, *args, **kwargs): super(TegileShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(tegile_opts) self._default_project = (self.configuration.safe_get( "tegile_default_project") or 'openstack') self._backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name or DEFAULT_BACKEND_NAME) self._hostname = self.configuration.safe_get('tegile_nas_server') username = self.configuration.safe_get('tegile_nas_login') password = self.configuration.safe_get('tegile_nas_password') self._api = TegileAPIExecutor(self.__class__.__name__, self._hostname, username, password) @debugger def create_share(self, context, share, share_server=None): """Is called to create share.""" share_name = share['name'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') params = (pool_name, self._default_project, share_name, share_proto) # Share name coming from the backend is the most reliable. Sometimes # a few options in Tegile array could cause sharename to be different # from the one passed to it. Eg. 'projectname-sharename' instead # of 'sharename' if inherited share properties are selected. ip, real_share_name = self._api('createShare', params).split() LOG.info(_LI("Created share %(sharename)s, share id %(shid)s."), {'sharename': share_name, 'shid': share['id']}) return self._get_location_path(real_share_name, share_proto, ip) @debugger def extend_share(self, share, new_size, share_server=None): """Is called to extend share. There is no resize for Tegile shares. We just adjust the quotas. The API is still called 'resizeShare'. """ self._adjust_size(share, new_size, share_server) @debugger def shrink_share(self, shrink_share, shrink_size, share_server=None): """Uses resize_share to shrink a share. There is no shrink for Tegile shares. We just adjust the quotas. The API is still called 'resizeShare'. """ self._adjust_size(shrink_share, shrink_size, share_server) @debugger def _adjust_size(self, share, new_size, share_server=None): pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), six.text_type(new_size), 'GB') self._api('resizeShare', params) @debugger def delete_share(self, context, share, share_server=None): """Is called to remove share.""" pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), True, False) self._api('deleteShare', params) @debugger def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" snap_name = snapshot['name'] pool, project, share_name = self._get_pool_project_share_name( snapshot['share']) share = { 'poolName': '%s' % pool, 'projectName': '%s' % project, 'name': share_name, 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project), 'mountpoint': share_name, 'local': 'true', } params = (share, snap_name, False) LOG.info(_LI('Creating snapshot for share_name=%(shr)s' ' snap_name=%(name)s'), {'shr': share_name, 'name': snap_name}) self._api('createShareSnapshot', params) @debugger def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Create a share from a snapshot - clone a snapshot.""" pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, snapshot['share_name'], TEGILE_SNAPSHOT_PREFIX, snapshot['name'], ), share_name, True, ) ip, real_share_name = self._api('cloneShareSnapshot', params).split() share_proto = share['share_proto'] return self._get_location_path(real_share_name, share_proto, ip) @debugger def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" pool, project, share_name = self._get_pool_project_share_name( snapshot['share']) params = ('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name, TEGILE_SNAPSHOT_PREFIX, snapshot['name']), False) self._api('deleteShareSnapshot', params) @debugger def ensure_share(self, context, share, share_server=None): """Invoked to sure that share is exported.""" # Fetching share name from server, because some configuration # options can cause sharename different from the OpenStack share name pool, project, share_name = self._get_pool_project_share_name(share) params = [ '%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), ] ip, real_share_name = self._api('getShareIPAndMountPoint', params).split() share_proto = share['share_proto'] location = self._get_location_path(real_share_name, share_proto, ip) return [location] @debugger def _allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] self._check_share_access(share_proto, access_type) pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto, access_type, access_to, access_level) self._api('shareAllowAccess', params) @debugger def _deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] self._check_share_access(share_proto, access_type) pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto, access_type, access_to, access_level) self._api('shareDenyAccess', params) def _check_share_access(self, share_proto, access_type): if share_proto == 'CIFS' and access_type != 'user': reason = _LW('Only USER access type is allowed for ' 'CIFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto == 'NFS' and access_type not in ('ip', 'user'): reason = _LW('Only IP or USER access types are allowed for ' 'NFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto not in ('NFS', 'CIFS'): reason = _LW('Unsupported protocol \"%s\" specified for ' 'access rule.') % share_proto raise exception.InvalidShareAccess(reason=reason) @debugger def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): if not (add_rules or delete_rules): # Recovery mode pool, project, share_name = ( self._get_pool_project_share_name(share)) share_proto = share['share_proto'] params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto) # Clears all current ACLs # Remove ip and user ACLs if share_proto is NFS # Remove user ACLs if share_proto is CIFS self._api('clearAccessRules', params) # Looping thru all rules. # Will have one API call per rule. for access in access_rules: self._allow_access(context, share, access, share_server) else: # Adding/Deleting specific rules for access in delete_rules: self._deny_access(context, share, access, share_server) for access in add_rules: self._allow_access(context, share, access, share_server) @debugger def _update_share_stats(self, **kwargs): """Retrieve stats info.""" try: data = self._api(method='getArrayStats', request_type='get', fine_logging=False) # fixing values coming back here as String to float for pool in data.get('pools', []): pool['total_capacity_gb'] = float( pool.get('total_capacity_gb', 0)) pool['free_capacity_gb'] = float( pool.get('free_capacity_gb', 0)) pool['allocated_capacity_gb'] = float( pool.get('allocated_capacity_gb', 0)) pool['qos'] = pool.pop('QoS_support', False) pool['reserved_percentage'] = ( self.configuration.reserved_share_percentage) pool['dedupe'] = True pool['compression'] = True pool['thin_provisioning'] = True pool['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) data['share_backend_name'] = self._backend_name data['vendor_name'] = VENDOR data['driver_version'] = VERSION data['storage_protocol'] = 'NFS_CIFS' data['snapshot_support'] = True data['qos'] = False super(TegileShareDriver, self)._update_share_stats(data) except Exception as e: msg = _('Unexpected error while trying to get the ' 'usage stats from array.') LOG.exception(msg) raise e @debugger def get_pool(self, share): """Returns pool name where share resides. :param share: The share hosted by the driver. :return: Name of the pool where given share is hosted. """ pool = share_utils.extract_host(share['host'], level='pool') return pool @debugger def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return 0 @debugger def _get_location_path(self, share_name, share_proto, ip=None): if ip is None: ip = self._hostname if share_proto == 'NFS': location = '%s:%s' % (ip, share_name) elif share_proto == 'CIFS': location = r'\\%s\%s' % (ip, share_name) else: message = _('Invalid NAS protocol supplied: %s.') % share_proto raise exception.InvalidInput(message) export_location = { 'path': location, 'is_admin_only': False, 'metadata': { 'preferred': True, }, } return export_location @debugger def _get_pool_project_share_name(self, share): pool = share_utils.extract_host(share['host'], level='pool') project = self._default_project share_name = share['name'] return pool, project, share_name manila-2.0.0/manila/share/drivers/emc/0000775000567000056710000000000012701407265020704 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/__init__.py0000664000567000056710000000000012701407107022776 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugin_manager.py0000664000567000056710000000214512701407107024243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EMC Share Driver Plugin Framework.""" from stevedore import extension class EMCPluginManager(object): def __init__(self, namespace): self.namespace = namespace self.extension_manager = extension.ExtensionManager(namespace) def load_plugin(self, name, logger=None): for ext in self.extension_manager.extensions: if ext.name == name: storage_conn = ext.plugin(logger) return storage_conn return None manila-2.0.0/manila/share/drivers/emc/plugins/0000775000567000056710000000000012701407265022365 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/vnx/0000775000567000056710000000000012701407265023200 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/vnx/constants.py0000664000567000056710000000313612701407107025564 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_OK = 'ok' STATUS_INFO = 'info' STATUS_DEBUG = 'debug' STATUS_WARNING = 'warning' STATUS_ERROR = 'error' STATUS_NOT_FOUND = 'not_found' MSG_GENERAL_ERROR = '13690601492' MSG_INVALID_VDM_ID = '14227341325' MSG_INVALID_MOVER_ID = '14227341323' MSG_FILESYSTEM_NOT_FOUND = "18522112101" MSG_FILESYSTEM_EXIST = '13691191325' MSG_VDM_EXIST = '13421840550' MSG_SNAP_EXIST = '13690535947' MSG_INTERFACE_NAME_EXIST = '13421840550' MSG_INTERFACE_EXIST = '13691781136' MSG_INTERFACE_INVALID_VLAN_ID = '13421850371' MSG_INTERFACE_NON_EXISTENT = '13691781134' MSG_JOIN_DOMAIN = '13157007726' MSG_UNJOIN_DOMAIN = '13157007723' IP_ALLOCATIONS = 2 CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'} XML_HEADER = '' XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api' CIFS_ACL_FULLCONTROL = 'fullcontrol' CIFS_ACL_READ = 'read' SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)' manila-2.0.0/manila/share/drivers/emc/plugins/vnx/utils.py0000664000567000056710000000365512701407107024716 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import types from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils CONF = cfg.CONF LOG = log.getLogger(__name__) def decorate_all_methods(decorator, debug_only=False): if debug_only and not CONF.debug: return lambda cls: cls def _decorate_all_methods(cls): for attr_name, attr_val in cls.__dict__.items(): if (isinstance(attr_val, types.FunctionType) and not attr_name.startswith("_")): setattr(cls, attr_name, decorator(attr_val)) return cls return _decorate_all_methods def log_enter_exit(func): if not CONF.debug: return func def inner(self, *args, **kwargs): LOG.debug("Entering %(cls)s.%(method)s.", {'cls': self.__class__.__name__, 'method': func.__name__}) start = timeutils.utcnow() ret = func(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug("Exiting %(cls)s.%(method)s. " "Spent %(duration)s sec. " "Return %(return)s.", {'cls': self.__class__.__name__, 'duration': timeutils.delta_seconds(start, end), 'method': func.__name__, 'return': ret}) return ret return inner manila-2.0.0/manila/share/drivers/emc/plugins/vnx/__init__.py0000664000567000056710000000000012701407107025272 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/vnx/xml_api_parser.py0000664000567000056710000002337512701407107026564 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from lxml import etree import six class XMLAPIParser(object): def __init__(self): # The following Boolean acts as the flag for the common sub-element. # For instance: # #
  • server_1
  • #
    # #
  • interface_1
  • #
    self.is_QueryStatus = False self.is_CifsServers = False self.is_Aliases = False self.is_MoverStatus = False self.is_TaskResponse = False self.is_Vdm = False self.is_Interfaces = False self.elt = {} def _remove_ns(self, tag): i = tag.find('}') if i >= 0: tag = tag[i + 1:] return tag def parse(self, xml): result = { 'type': None, 'taskId': None, 'maxSeverity': None, 'objects': [], 'problems': [], } events = ("start", "end") context = etree.iterparse(six.BytesIO(xml.encode('utf-8')), events=events) for action, elem in context: self.tag = self._remove_ns(elem.tag) func = self._get_func(action, self.tag) if func in vars(XMLAPIParser): if action == 'start': eval('self.' + func)(elem, result) elif action == 'end': eval('self.' + func)() return result def _get_func(self, action, tag): if tag == 'W2KServerData': return action + '_' + 'w2k_server_data' temp_list = re.sub(r"([A-Z])", r" \1", tag).split() if temp_list: func_name = action + '_' + '_'.join(temp_list) else: func_name = action + '_' + tag return func_name.lower() def _copy_property(self, source, target, property, list_property=None): for key in property: if key in source: target[key] = source[key] if list_property: for key in list_property: if key in source: target[key] = source[key].split() def _append_elm_property(self, elm, result, property, identifier): for obj in result['objects']: if (identifier in obj and identifier in elm.attrib and elm.attrib[identifier] == obj[identifier]): for key, value in elm.attrib.items(): if key in property: obj[key] = value def _append_element(self, elm, result, property, list_property, identifier): sub_elm = {} self._copy_property(elm.attrib, sub_elm, property, list_property) for obj in result['objects']: if (identifier in obj and identifier in elm.attrib and elm.attrib[identifier] == obj[identifier]): if self.tag in obj: obj[self.tag].append(sub_elm) else: obj[self.tag] = [sub_elm] def start_task_response(self, elm, result): self.is_TaskResponse = True result['type'] = 'TaskResponse' self._copy_property(elm.attrib, result, ['taskId']) def end_task_response(self): self.is_TaskResponse = False def start_fault(self, elm, result): result['type'] = 'Fault' def start_status(self, elm, result): if self.is_TaskResponse: result['maxSeverity'] = elm.attrib['maxSeverity'] elif self.is_MoverStatus or self.is_Vdm: self.elt['maxSeverity'] = elm.attrib['maxSeverity'] def start_query_status(self, elm, result): self.is_QueryStatus = True result['type'] = 'QueryStatus' self._copy_property(elm.attrib, result, ['maxSeverity']) def end_query_status(self): self.is_QueryStatus = False def start_problem(self, elm, result): self.elt = {} properties = ('message', 'messageCode') self._copy_property(elm.attrib, self.elt, properties) result['problems'].append(self.elt) def start_description(self, elm, result): self.elt['Description'] = elm.text def start_action(self, elm, result): self.elt['Action'] = elm.text def start_diagnostics(self, elm, result): self.elt['Diagnostics'] = elm.text def start_file_system(self, elm, result): self.elt = {} property = ( 'fileSystem', 'name', 'type', 'storages', 'volume', 'dataServicePolicies', 'internalUse', ) list_property = ('storagePools',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_file_system_capacity_info(self, elm, result): property = ('volumeSize',) identifier = 'fileSystem' self._append_elm_property(elm, result, property, identifier) def start_storage_pool(self, elm, result): self.elt = {} property = ('name', 'autoSize', 'usedSize', 'diskType', 'pool', 'dataServicePolicies', 'virtualProvisioning') list_property = ('movers',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_system_storage_pool_data(self, elm, result): property = ('greedy', 'isBackendPool') self._copy_property(elm.attrib, self.elt, property) def start_mover(self, elm, result): self.elt = {} property = ('name', 'host', 'mover', 'role') list_property = ('ntpServers', 'standbyFors', 'standbys') self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_mover_status(self, elm, result): self.is_MoverStatus = True property = ('version', 'csTime', 'clock', 'timezone', 'uptime') identifier = 'mover' self._append_elm_property(elm, result, property, identifier) def end_mover_status(self): self.is_MoverStatus = False def start_mover_dns_domain(self, elm, result): property = ('name', 'protocol') list_property = ('servers',) identifier = 'mover' self._append_element(elm, result, property, list_property, identifier) def start_mover_interface(self, elm, result): property = ( 'name', 'device', 'up', 'ipVersion', 'netMask', 'ipAddress', 'vlanid', ) identifier = 'mover' self._append_element(elm, result, property, None, identifier) def start_logical_network_device(self, elm, result): property = ('name', 'type', 'speed') list_property = ('interfaces',) identifier = 'mover' self._append_element(elm, result, property, list_property, identifier) def start_vdm(self, elm, result): self.is_Vdm = True self.elt = {} property = ('name', 'state', 'mover', 'vdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def end_vdm(self): self.is_Vdm = False def start_interfaces(self, elm, result): self.is_Interfaces = True self.elt['Interfaces'] = [] def end_interfaces(self): self.is_Interfaces = False def start_li(self, elm, result): if self.is_CifsServers: self.elt['CifsServers'].append(elm.text) elif self.is_Aliases: self.elt['Aliases'].append(elm.text) elif self.is_Interfaces: self.elt['Interfaces'].append(elm.text) def start_cifs_server(self, elm, result): self.elt = {} property = ('type', 'localUsers', 'name', 'mover', 'moverIdIsVdm') list_property = ('interfaces',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_aliases(self, elm, result): self.is_Aliases = True self.elt['Aliases'] = [] def end_aliases(self): self.is_Aliases = False def start_w2k_server_data(self, elm, result): property = ('domain', 'compName', 'domainJoined') self._copy_property(elm.attrib, self.elt, property) def start_cifs_share(self, elm, result): self.elt = {} property = ('path', 'fileSystem', 'name', 'mover', 'moverIdIsVdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def start_cifs_servers(self, elm, result): self.is_CifsServers = True self.elt['CifsServers'] = [] def end_cifs_servers(self): self.is_CifsServers = False def start_checkpoint(self, elm, result): self.elt = {} property = ('checkpointOf', 'name', 'checkpoint', 'state') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def start_mount(self, elm, result): self.elt = {} property = ('fileSystem', 'path', 'mover', 'moverIdIsVdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) manila-2.0.0/manila/share/drivers/emc/plugins/vnx/connector.py0000664000567000056710000001422412701407107025542 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pipes from oslo_concurrency import processutils from oslo_log import log from oslo_utils import excutils import six from six.moves import http_cookiejar from six.moves.urllib import error as url_error # pylint: disable=E0611 from six.moves.urllib import request as url_request # pylint: disable=E0611 from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.share.drivers.emc.plugins.vnx import constants from manila import utils LOG = log.getLogger(__name__) class XMLAPIConnector(object): def __init__(self, configuration, debug=True): super(XMLAPIConnector, self).__init__() self.storage_ip = configuration.emc_nas_server self.username = configuration.emc_nas_login self.password = configuration.emc_nas_password self.debug = debug self.auth_url = 'https://' + self.storage_ip + '/Login' self._url = ('https://' + self.storage_ip + '/servlets/CelerraManagementServices') https_handler = url_request.HTTPSHandler() cookie_handler = url_request.HTTPCookieProcessor( http_cookiejar.CookieJar()) self.url_opener = url_request.build_opener(https_handler, cookie_handler) self._do_setup() def _do_setup(self): credential = ('user=' + self.username + '&password=' + self.password + '&Login=Login') req = url_request.Request(self.auth_url, credential, constants.CONTENT_TYPE_URLENCODE) resp = self.url_opener.open(req) resp_body = resp.read() self._http_log_resp(resp, resp_body) def _http_log_req(self, req): if not self.debug: return string_parts = ['curl -i'] string_parts.append(' -X %s' % req.get_method()) for k in req.headers: header = ' -H "%s: %s"' % (k, req.headers[k]) string_parts.append(header) if req.data: string_parts.append(" -d '%s'" % req.data) string_parts.append(' ' + req.get_full_url()) LOG.debug("\nREQ: %s.\n", "".join(string_parts)) def _http_log_resp(self, resp, body): if not self.debug: return headers = six.text_type(resp.headers).replace('\n', '\\n') LOG.debug( 'RESP: [%(code)s] %(resp_hdrs)s\n' 'RESP BODY: %(resp_b)s.\n', { 'code': resp.getcode(), 'resp_hdrs': headers, 'resp_b': body, } ) def _request(self, req_body=None, method=None, header=constants.CONTENT_TYPE_URLENCODE): req = url_request.Request(self._url, req_body, header) if method not in (None, 'GET', 'POST'): req.get_method = lambda: method self._http_log_req(req) try: resp = self.url_opener.open(req) resp_body = resp.read() self._http_log_resp(resp, resp_body) except url_error.HTTPError as http_err: err = {'errorCode': -1, 'httpStatusCode': http_err.code, 'messages': six.text_type(http_err), 'request': req_body} msg = (_("The request is invalid. Reason: %(reason)s") % {'reason': err}) if '403' == six.text_type(http_err.code): raise exception.NotAuthorized() else: raise exception.ManilaException(message=msg) return resp_body def request(self, req_body=None, method=None, header=constants.CONTENT_TYPE_URLENCODE): try: resp_body = self._request(req_body, method, header) except exception.NotAuthorized: LOG.debug("Login again because client certification " "may be expired.") self._do_setup() resp_body = self._request(req_body, method, header) return resp_body class SSHConnector(object): def __init__(self, configuration, debug=True): super(SSHConnector, self).__init__() self.storage_ip = configuration.emc_nas_server self.username = configuration.emc_nas_login self.password = configuration.emc_nas_password self.debug = debug self.sshpool = utils.SSHPool(ip=self.storage_ip, port=22, conn_timeout=None, login=self.username, password=self.password) def run_ssh(self, cmd_list, check_exit_code=False): command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list) with self.sshpool.item() as ssh: try: out, err = processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) self.log_request(command, out, err) return out, err except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): msg = (_LE('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.'), {'cmd': command, 'excmsg': six.text_type(e)}) LOG.error(msg) def log_request(self, cmd, out, err): if not self.debug: return LOG.debug("\nSSH command: %s.\n", cmd) LOG.debug("SSH command output: out=%(out)s, err=%(err)s.\n", {'out': out, 'err': err}) manila-2.0.0/manila/share/drivers/emc/plugins/vnx/object_manager.py0000664000567000056710000022150112701407107026506 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from lxml import builder from lxml import etree as ET from oslo_concurrency import processutils from oslo_log import log import six from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila.i18n import _LW from manila.share.drivers.emc.plugins.vnx import connector from manila.share.drivers.emc.plugins.vnx import constants from manila.share.drivers.emc.plugins.vnx import utils as vnx_utils from manila.share.drivers.emc.plugins.vnx import xml_api_parser as parser from manila import utils LOG = log.getLogger(__name__) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class StorageObjectManager(object): def __init__(self, configuration): self.context = dict() self.connectors = dict() self.connectors['XML'] = connector.XMLAPIConnector(configuration) self.connectors['SSH'] = connector.SSHConnector(configuration) elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE}) xml_parser = parser.XMLAPIParser() obj_types = StorageObject.__subclasses__() # pylint: disable=E1101 for item in obj_types: key = item.__name__ self.context[key] = eval(key)(self.connectors, elt_maker, xml_parser, self) def getStorageContext(self, type): if type in self.context: return self.context[type] else: message = (_("Invalid storage object type %s.") % type) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) class StorageObject(object): def __init__(self, conn, elt_maker, xml_parser, manager): self.conn = conn self.elt_maker = elt_maker self.xml_parser = xml_parser self.manager = manager self.xml_retry = False self.ssh_retry_patterns = [ ( constants.SSH_DEFAULT_RETRY_PATTERN, exception.EMCVnxLockRequiredException() ), ] def _translate_response(self, response): """Translate different status to ok/error status.""" if (constants.STATUS_OK == response['maxSeverity'] or constants.STATUS_ERROR == response['maxSeverity']): return old_Severity = response['maxSeverity'] if response['maxSeverity'] in (constants.STATUS_DEBUG, constants.STATUS_INFO): response['maxSeverity'] = constants.STATUS_OK LOG.warning(_LW("Translated status from %(old)s to %(new)s. " "Message: %(info)s."), {'old': old_Severity, 'new': response['maxSeverity'], 'info': response}) def _response_validation(self, response, error_code): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) for code in msg_codes: if code == error_code: return True return False def _get_problem_message_codes(self, problems): message_codes = [] for problem in problems: if 'messageCode' in problem: message_codes.append(problem['messageCode']) return message_codes def _get_problem_messages(self, problems): messages = [] for problem in problems: if 'message' in problem: messages.append(problem['message']) return messages def _get_problem_diags(self, problems): diags = [] for problem in problems: if 'Diagnostics' in problem: diags.append(problem['Diagnostics']) return diags def _build_query_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.Query(body) ) ) def _build_task_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.StartTask(body, timeout='300') ) ) def _send_request(self, req): req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8') rsp_xml = self.conn['XML'].request(str(req_xml)) response = self.xml_parser.parse(rsp_xml) self._translate_response(response) return response @utils.retry(exception.EMCVnxLockRequiredException) def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False): """Execute NAS command via SSH. :param retry_patterns: list of tuples,where each tuple contains a reg expression and a exception. :param check_exit_code: Boolean. Raise processutils.ProcessExecutionError if the command failed to execute and this parameter is set to True. """ if retry_patterns is None: retry_patterns = self.ssh_retry_patterns try: out, err = self.conn['SSH'].run_ssh(cmd, check_exit_code) except processutils.ProcessExecutionError as e: for pattern in retry_patterns: if re.search(pattern[0], e.stdout): raise pattern[1] raise e return out, err def _copy_properties(self, source, target, property_map, deep_copy=True): for property in property_map: if isinstance(property, tuple): target_key, src_key = property else: target_key = src_key = property if src_key in source: if deep_copy and isinstance(source[src_key], list): target[target_key] = copy.deepcopy(source[src_key]) else: target[target_key] = source[src_key] else: target[target_key] = None def _get_mover_id(self, mover_name, is_vdm): if is_vdm: return self.get_context('VDM').get_id(mover_name) else: return self.get_context('Mover').get_id(mover_name, self.xml_retry) def get_context(self, type): return self.manager.getStorageContext(type) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class FileSystem(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager) self.filesystem_map = dict() @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, name, size, pool_name, mover_name, is_vdm=True): pool_id = self.get_context('StoragePool').get_id(pool_name) mover_id = self._get_mover_id(mover_name, is_vdm) if is_vdm: mover = self.elt_maker.Vdm(vdm=mover_id) else: mover = self.elt_maker.Mover(mover=mover_id) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewFileSystem( mover, self.elt_maker.StoragePool( pool=pool_id, size=six.text_type(size), mayContainSlices='true' ), name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_FILESYSTEM_EXIST): LOG.warning(_LW("File system %s already exists. " "Skip the creation."), name) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.filesystem_map: request = self._build_query_package( self.elt_maker.FileSystemQueryParams( self.elt_maker.AspectSelection( fileSystems='true', fileSystemCapacityInfos='true' ), self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: if self._is_filesystem_nonexistent(response): return constants.STATUS_NOT_FOUND, response['problems'] else: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] filesystem = {} property_map = ( 'name', ('pools_id', 'storagePools'), ('volume_id', 'volume'), ('size', 'volumeSize'), ('id', 'fileSystem'), 'type', 'dataServicePolicies', ) self._copy_properties(src, filesystem, property_map) self.filesystem_map[name] = filesystem return constants.STATUS_OK, self.filesystem_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("File system %s not found. Skip the deletion."), name) return elif constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) id = self.filesystem_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteFileSystem(fileSystem=id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.filesystem_map.pop(name) def extend(self, name, pool_name, new_size): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) id = out['id'] size = int(out['size']) if new_size < size: message = (_("Failed to extend file system %(name)s because new " "size %(new_size)d is smaller than old size " "%(size)d.") % {'name': name, 'new_size': new_size, 'size': size}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) elif new_size == size: return pool_id = self.get_context('StoragePool').get_id(pool_name) request = self._build_task_package( self.elt_maker.ExtendFileSystem( self.elt_maker.StoragePool( pool=pool_id, size=six.text_type(new_size - size) ), fileSystem=id, ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to extend file system %(name)s to new size " "%(new_size)d. Reason: %(err)s.") % {'name': name, 'new_size': new_size, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return self.filesystem_map[name]['id'] def _is_filesystem_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) diags = self._get_problem_diags(response['problems']) for code, diagnose in zip(msg_codes, diags): if (code == constants.MSG_FILESYSTEM_NOT_FOUND and diagnose.find('File system not found.') != -1): return True return False def create_from_snapshot(self, name, snap_name, source_fs_name, pool_name, mover_name, connect_id): create_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-name', name, '-type', 'uxfs', '-create', 'samesize=' + source_fs_name, 'pool=%s' % pool_name, 'storage=SINGLE', 'worm=off', '-thin', 'no', '-option', 'slice=y', ] self._execute_cmd(create_fs_cmd) ro_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'ro', name, '/%s' % name, ] self._execute_cmd(ro_mount_cmd) session_name = name + ':' + snap_name copy_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_copy', '-name', session_name[0:63], '-source', '-ckpt', snap_name, '-destination', '-fs', name, '-interconnect', 'id=%s' % connect_id, '-overwrite_destination', '-full_copy', ] try: self._execute_cmd(copy_ckpt_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_("Failed to copy content from snapshot %(snap)s to " "file system %(filesystem)s. Reason: %(err)s.") % {'snap': snap_name, 'filesystem': name, 'err': six.text_type(expt)}) LOG.error(message) # When an error happens during nas_copy, we need to continue # deleting the checkpoint of the target file system if it exists. query_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-info', name, ] out, err = self._execute_cmd(query_fs_cmd) re_ckpts = r'ckpts\s*=\s*(.*)\s*' m = re.search(re_ckpts, out) if m is not None: ckpts = m.group(1) for ckpt in re.split(',', ckpts): umount_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_umount', mover_name, '-perm', ckpt, ] self._execute_cmd(umount_ckpt_cmd) delete_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-delete', ckpt, '-Force', ] self._execute_cmd(delete_ckpt_cmd) rw_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'rw', name, '/%s' % name, ] self._execute_cmd(rw_mount_cmd) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class StoragePool(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(StoragePool, self).__init__(conn, elt_maker, xml_parser, manager) self.pool_map = dict() def get(self, name, force=False): if name not in self.pool_map or force: status, out = self.get_all() if constants.STATUS_OK != status: return status, out if name not in self.pool_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.pool_map[name] def get_all(self): self.pool_map.clear() request = self._build_query_package( self.elt_maker.StoragePoolQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: pool = {} property_map = ( 'name', ('movers_id', 'movers'), ('total_size', 'autoSize'), ('used_size', 'usedSize'), 'diskType', 'dataServicePolicies', ('id', 'pool'), ) self._copy_properties(item, pool, property_map) self.pool_map[item['name']] = pool return constants.STATUS_OK, self.pool_map def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get storage pool by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return out['id'] @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class MountPoint(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, mount_path, fs_name, mover_name, is_vdm=True): fs_id = self.get_context('FileSystem').get_id(fs_name) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMount( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', ), fileSystem=fs_id, path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._is_mount_point_already_existent(response): LOG.warning(_LW("Mount Point %(mount)s already exists. " "Skip the creation."), {'mount': mount_path}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to create Mount Point %(mount)s for ' 'file system %(fs_name)s. Reason: %(err)s.') % {'mount': mount_path, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(exception.EMCVnxInvalidMoverID) def get(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.MountQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if not response['objects']: return constants.STATUS_NOT_FOUND, None else: return constants.STATUS_OK, response['objects'] @utils.retry(exception.EMCVnxInvalidMoverID) def delete(self, mount_path, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMount( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._is_mount_point_nonexistent(response): LOG.warning(_LW('Mount point %(mount)s on mover %(mover_name)s ' 'not found.'), {'mount': mount_path, 'mover_name': mover_name}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to delete mount point %(mount)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'mount': mount_path, 'mover_name': mover_name, 'err': response}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _is_mount_point_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'No such path or invalid operation') != -1) or code == constants.MSG_INVALID_VDM_ID or code == constants.MSG_INVALID_MOVER_ID): return True return False def _is_mount_point_already_existent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'Mount already exists') != -1)): return True return False @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class Mover(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Mover, self).__init__(conn, elt_maker, xml_parser, manager) self.mover_map = dict() self.mover_ref_map = dict() def get_ref(self, name, force=False): if name not in self.mover_ref_map or force: self.mover_ref_map.clear() request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection(movers='true') ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] for item in response['objects']: mover = {} property_map = ('name', ('id', 'mover')) self._copy_properties(item, mover, property_map) if mover: self.mover_ref_map[mover['name']] = mover if (name not in self.mover_ref_map or self.mover_ref_map[name]['id'] == ''): return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.mover_ref_map[name] def get(self, name, force=False): if name not in self.mover_map or force: if name in self.mover_ref_map and not force: mover_id = self.mover_ref_map[name]['id'] else: mover_id = self.get_id(name, force) if name in self.mover_map: self.mover_map.pop(name) request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection( moverDeduplicationSettings='true', moverDnsDomains='true', moverInterfaces='true', moverNetworkDevices='true', moverNisDomains='true', moverRoutes='true', movers='true', moverStatuses='true' ), mover=mover_id ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] mover = {} src = response['objects'][0] property_map = ( 'name', ('id', 'mover'), ('Status', 'maxSeverity'), 'version', 'uptime', 'role', ('interfaces', 'MoverInterface'), ('devices', 'LogicalNetworkDevice'), ('dns_domain', 'MoverDnsDomain'), ) self._copy_properties(src, mover, property_map) internal_devices = [] if mover['interfaces']: for interface in mover['interfaces']: if self._is_internal_device(interface['device']): internal_devices.append(interface) mover['interfaces'] = [var for var in mover['interfaces'] if var not in internal_devices] self.mover_map[name] = mover return constants.STATUS_OK, self.mover_map[name] def get_id(self, name, force=False): status, mover_ref = self.get_ref(name, force) if constants.STATUS_OK != status: message = (_("Failed to get mover by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return mover_ref['id'] def _is_internal_device(self, device): for device_type in ('mge', 'fxg', 'tks', 'fsn'): if device.find(device_type) == 0: return True return False def get_interconnect_id(self, source, destination): header = [ 'id', 'name', 'source_server', 'destination_system', 'destination_server', ] conn_id = None command_nas_cel = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_cel', '-interconnect', '-l', ] out, err = self._execute_cmd(command_nas_cel) lines = out.strip().split('\n') for line in lines: if line.strip().split() == header: LOG.info(_LI('Found the header of the command ' '/nas/bin/nas_cel -interconnect -l.')) else: interconn = line.strip().split() if interconn[2] == source and interconn[4] == destination: conn_id = interconn[0] return conn_id def get_physical_devices(self, mover_name): physical_network_devices = [] cmd_sysconfig = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', mover_name, '-pci' ] out, err = self._execute_cmd(cmd_sysconfig) re_pattern = ('0:\s*(?P\S+)\s*IRQ:\s*(?P\d+)\n' '.*\n' '\s*Link:\s*(?P[A-Za-z]+)') for device in re.finditer(re_pattern, out): if 'Up' in device.group('link'): physical_network_devices.append(device.group('name')) return physical_network_devices @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class VDM(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(VDM, self).__init__(conn, elt_maker, xml_parser, manager) self.vdm_map = dict() @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, name, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewVdm(mover=mover_id, name=name) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation(response, constants.MSG_VDM_EXIST): LOG.warning(_LW("VDM %(name)s already exists. Skip the creation."), {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create VDM %(name)s on mover " "%(mover_name)s. Reason: %(err)s.") % {'name': name, 'mover_name': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.vdm_map: request = self._build_query_package( self.elt_maker.VdmQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] elif not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: vdm = {} property_map = ( 'name', ('id', 'vdm'), 'state', ('host_mover_id', 'mover'), ('interfaces', 'Interfaces'), ) self._copy_properties(item, vdm, property_map) self.vdm_map[item['name']] = vdm if name not in self.vdm_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.vdm_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("VDM %s not found. Skip the deletion."), name) return elif constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) vdm_id = self.vdm_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteVdm(vdm=vdm_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete VDM %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.vdm_map.pop(name) def get_id(self, name): status, vdm = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return vdm['id'] def attach_nfs_interface(self, vdm_name, if_name): command_attach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-attach', if_name, ] self._execute_cmd(command_attach_nfs_interface) def detach_nfs_interface(self, vdm_name, if_name): command_detach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-detach', if_name, ] try: self._execute_cmd(command_detach_nfs_interface, check_exit_code=True) except processutils.ProcessExecutionError: interfaces = self.get_interfaces(vdm_name) if if_name not in interfaces['nfs']: LOG.debug("Failed to detach interface %(interface)s " "from mover %(mover_name)s.", {'interface': if_name, 'mover_name': vdm_name}) else: message = (_("Failed to detach interface %(interface)s " "from mover %(mover_name)s.") % {'interface': if_name, 'mover_name': vdm_name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_interfaces(self, vdm_name): interfaces = { 'cifs': [], 'nfs': [], } re_pattern = ('Interfaces to services mapping:' '\s*(?P(\s*interface=.*)*)') command_get_interfaces = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-i', '-vdm', vdm_name, ] out, err = self._execute_cmd(command_get_interfaces) m = re.search(re_pattern, out) if m: if_list = m.group('interfaces').split('\n') for i in if_list: m_if = re.search('\s*interface=(?P.*)\s*:' '\s*(?P.*)\s*', i) if m_if: if_name = m_if.group('if').strip() if 'cifs' == m_if.group('type') and if_name != '': interfaces['cifs'].append(if_name) elif 'vdm' == m_if.group('type') and if_name != '': interfaces['nfs'].append(if_name) return interfaces @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class Snapshot(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Snapshot, self).__init__(conn, elt_maker, xml_parser, manager) self.snap_map = dict() def create(self, name, fs_name, pool_id, ckpt_size=None): fs_id = self.get_context('FileSystem').get_id(fs_name) if ckpt_size: elt_pool = self.elt_maker.StoragePool( pool=pool_id, size=six.text_type(ckpt_size) ) else: elt_pool = self.elt_maker.StoragePool(pool=pool_id) new_ckpt = self.elt_maker.NewCheckpoint( self.elt_maker.SpaceAllocationMethod( elt_pool ), checkpointOf=fs_id, name=name ) request = self._build_task_package(new_ckpt) response = self._send_request(request) if self._response_validation(response, constants.MSG_SNAP_EXIST): LOG.warning(_LW("Snapshot %(name)s already exists. " "Skip the creation."), {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create snapshot %(name)s on " "filesystem %(fs_name)s. Reason: %(err)s.") % {'name': name, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.snap_map: request = self._build_query_package( self.elt_maker.CheckpointQueryParams( self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] snap = {} property_map = ( 'name', ('id', 'checkpoint'), 'checkpointOf', 'state', ) self._copy_properties(src, snap, property_map) self.snap_map[name] = snap return constants.STATUS_OK, self.snap_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("Snapshot %s not found. Skip the deletion."), name) return elif constants.STATUS_OK != status: message = (_("Failed to get snapshot by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) chpt_id = self.snap_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteCheckpoint(checkpoint=chpt_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete snapshot %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.snap_map.pop(name) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get snapshot by %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return self.snap_map[name]['id'] @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class MoverInterface(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MoverInterface, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, interface): # Maximum of 32 characters for mover interface name name = interface['name'] if len(name) > 32: name = name[0:31] device_name = interface['device_name'] ip_addr = interface['ip'] mover_name = interface['mover_name'] net_mask = interface['net_mask'] vlan_id = interface['vlan_id'] if interface['vlan_id'] else -1 mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverInterface( device=device_name, ipAddress=six.text_type(ip_addr), mover=mover_id, name=name, netMask=net_mask, vlanid=six.text_type(vlan_id) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NAME_EXIST): LOG.warning(_LW("Mover interface name %s already exists. " "Skip the creation."), name) return elif self._response_validation( response, constants.MSG_INTERFACE_EXIST): LOG.warning(_LW("Mover interface IP %s already exists. " "Skip the creation."), ip_addr) return elif self._response_validation( response, constants.MSG_INTERFACE_INVALID_VLAN_ID): # When fail to create a mover interface with the specified # vlan id, VNX will leave a interface with vlan id 0 in the # backend. So we should explicitly remove the interface. try: self.delete(six.text_type(ip_addr), mover_name) except exception.EMCVnxXMLAPIError: pass message = (_("Invalid vlan id %s. Other interfaces on this " "subnet are in a different vlan.") % vlan_id) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create mover interface %(interface)s. " "Reason: %(err)s.") % {'interface': interface, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name, mover_name): # Maximum of 32 characters for mover interface name if len(name) > 32: name = name[0:31] status, mover = self.manager.getStorageContext('Mover').get( mover_name, True) if constants.STATUS_OK == status: for interface in mover['interfaces']: if name == interface['name']: return constants.STATUS_OK, interface return constants.STATUS_NOT_FOUND, None @utils.retry(exception.EMCVnxInvalidMoverID) def delete(self, ip_addr, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverInterface( ipAddress=six.text_type(ip_addr), mover=mover_id ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NON_EXISTENT): LOG.warning(_LW("Mover interface %s not found. " "Skip the deletion."), ip_addr) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete mover interface %(ip)s on mover " "%(mover)s. Reason: %(err)s.") % {'ip': ip_addr, 'mover': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class DNSDomain(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, mover_name, name, servers, protocol='udp'): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverDnsDomain( mover=mover_id, name=name, servers=servers, protocol=protocol ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create DNS domain %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(exception.EMCVnxInvalidMoverID) def delete(self, mover_name, name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverDnsDomain( mover=mover_id, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: LOG.warning(_LW("Failed to delete DNS domain %(name)s. " "Reason: %(err)s."), {'name': name, 'err': response['problems']}) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class CIFSServer(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_server_map = dict() @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, server_args): compName = server_args['name'] # Maximum of 14 characters for netBIOS name name = server_args['name'][-14:] # Maximum of 12 characters for alias name alias_name = server_args['name'][-12:] interfaces = server_args['interface_ip'] domain_name = server_args['domain_name'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] is_vdm = server_args['is_vdm'] mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False alias_name_list = [self.elt_maker.li(alias_name)] request = self._build_task_package( self.elt_maker.NewW2KCifsServer( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if server_args['is_vdm'] else 'false' ), self.elt_maker.Aliases(*alias_name_list), self.elt_maker.JoinDomain(userName=user_name, password=password), compName=compName, domain=domain_name, interfaces=interfaces, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) if constants.STATUS_OK != response['maxSeverity']: status, out = self.get(compName, mover_name, is_vdm) if constants.STATUS_OK == status and out['domainJoined'] == 'true': return else: message = (_("Failed to create CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(exception.EMCVnxInvalidMoverID) def get_all(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.CifsServerQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if mover_name in self.cifs_server_map: self.cifs_server_map.pop(mover_name) self.cifs_server_map[mover_name] = dict() for item in response['objects']: self.cifs_server_map[mover_name][item['compName'].lower()] = item return constants.STATUS_OK, self.cifs_server_map[mover_name] def get(self, name, mover_name, is_vdm=True, force=False): # name is compName name = name.lower() if (mover_name in self.cifs_server_map and name in self.cifs_server_map[mover_name]) and not force: return constants.STATUS_OK, self.cifs_server_map[mover_name][name] self.get_all(mover_name, is_vdm) if mover_name in self.cifs_server_map: for compName, server in self.cifs_server_map[mover_name].items(): if name == compName: return constants.STATUS_OK, server return constants.STATUS_NOT_FOUND, None @utils.retry(exception.EMCVnxInvalidMoverID) def modify(self, server_args): """Make CIFS server join or un-join the domain. :param server_args: Dictionary for CIFS server modification name: CIFS server name instead of compName join_domain: True for joining the domain, false for un-joining user_name: User name under which the domain is joined password: Password associated with the user name mover_name: mover or VDM name is_vdm: Boolean to indicate mover or VDM :raises exception.EMCVnxXMLAPIError: if modification fails. """ name = server_args['name'] join_domain = server_args['join_domain'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] if 'is_vdm' in server_args.keys(): is_vdm = server_args['is_vdm'] else: is_vdm = True mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.ModifyW2KCifsServer( self.elt_maker.DomainSetting( joinDomain='true' if join_domain else 'false', password=password, userName=user_name, ), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._ignore_modification_error(response, join_domain): return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to modify CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _ignore_modification_error(self, response, join_domain): if self._response_validation(response, constants.MSG_JOIN_DOMAIN): return join_domain elif self._response_validation(response, constants.MSG_UNJOIN_DOMAIN): return not join_domain return False def delete(self, computer_name, mover_name, is_vdm=True): try: status, out = self.get( computer_name.lower(), mover_name, is_vdm, self.xml_retry) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion."), {'name': computer_name, 'mover_name': mover_name}) return except exception.EMCVnxXMLAPIError: LOG.warning(_LW("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion."), {'name': computer_name, 'mover_name': mover_name}) return server_name = out['name'] mover_id = self._get_mover_id(mover_name, is_vdm) request = self._build_task_package( self.elt_maker.DeleteCifsServer( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=server_name ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete CIFS server %(name)s. " "Reason: %(err)s.") % {'name': computer_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.cifs_server_map[mover_name].pop(computer_name) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class CIFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_share_map = dict() @utils.retry(exception.EMCVnxInvalidMoverID) def create(self, name, server_name, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False share_path = '/' + name request = self._build_task_package( self.elt_maker.NewCifsShare( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ), self.elt_maker.CifsServers(self.elt_maker.li(server_name)), name=name, path=share_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file share %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.cifs_share_map: request = self._build_query_package( self.elt_maker.CifsShareQueryParams(name=name) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, None self.cifs_share_map[name] = response['objects'][0] return constants.STATUS_OK, self.cifs_share_map[name] @utils.retry(exception.EMCVnxInvalidMoverID) def delete(self, name, mover_name, is_vdm=True): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("CIFS share %s not found. Skip the deletion."), name) return elif constants.STATUS_OK != status: message = (_("Failed to get CIFS share by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False netbios_names = self.cifs_share_map[name]['CifsServers'] request = self._build_task_package( self.elt_maker.DeleteCifsShare( self.elt_maker.CifsServers(*map(lambda a: self.elt_maker.li(a), netbios_names)), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.cifs_share_map.pop(name) def disable_share_access(self, share_name, mover_name): cmd_str = 'sharesd %s set noaccess' % share_name disable_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % cmd_str, ] try: self._execute_cmd(disable_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to disable the access to CIFS share ' '%(name)s. Reason: %(err)s.') % {'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def allow_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain allow_str = ('sharesd %(share_name)s grant %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % allow_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' % {'domain': domain, 'user': user_name}, re.I) if re.search(dup_msg, expt.stdout): LOG.warning(_LW("Duplicate access control entry, " "skipping allow...")) else: message = (_('Failed to allow the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def deny_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain revoke_str = ('sharesd %(share_name)s revoke %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % revoke_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: not_found_msg = re.compile( r'No ACE found for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) user_err_msg = re.compile( r'Cannot get mapping for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) if re.search(not_found_msg, expt.stdout): LOG.warning(_LW("No access control entry found, " "skipping deny...")) elif re.search(user_err_msg, expt.stdout): LOG.warning(_LW("User not found on domain, skipping deny...")) else: message = (_('Failed to deny the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class NFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(NFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.nfs_share_map = {} def create(self, name, mover_name): share_path = '/' + name create_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-option', 'access=-0.0.0.0/0.0.0.0', share_path, ] try: self._execute_cmd(create_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to create NFS share %(name)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def delete(self, name, mover_name): path = '/' + name status, out = self.get(name, mover_name) if constants.STATUS_NOT_FOUND == status: LOG.warning(_LW("NFS share %s not found. Skip the deletion."), path) return delete_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-unexport', '-perm', path, ] try: self._execute_cmd(delete_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to delete NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.nfs_share_map.pop(name) def get(self, name, mover_name, force=False, check_exit_code=False): if name in self.nfs_share_map and not force: return constants.STATUS_OK, self.nfs_share_map[name] path = '/' + name nfs_share = { "mover_name": '', "path": '', 'AccessHosts': [], 'RwHosts': [], 'RoHosts': [], 'RootHosts': [], 'readOnly': '', } nfs_query_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-P', 'nfs', '-list', path, ] try: out, err = self._execute_cmd(nfs_query_cmd, check_exit_code=check_exit_code) except processutils.ProcessExecutionError as expt: dup_msg = (r'%(mover_name)s : No such file or directory' % {'mover_name': mover_name}) if re.search(dup_msg, expt.stdout): LOG.warning(_LW("NFS share %s not found."), name) return constants.STATUS_NOT_FOUND, None else: message = (_('Failed to list NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) re_exports = '%s\s*:\s*\nexport\s*(.*)\n' % mover_name m = re.search(re_exports, out) if m is not None: nfs_share['path'] = path nfs_share['mover_name'] = mover_name export = m.group(1) fields = export.split(" ") for field in fields: field = field.strip() if field.startswith('rw='): nfs_share['RwHosts'] = field[3:].split(":") elif field.startswith('access='): nfs_share['AccessHosts'] = field[7:].split(":") elif field.startswith('root='): nfs_share['RootHosts'] = field[5:].split(":") elif field.startswith('ro='): nfs_share['RoHosts'] = field[3:].split(":") self.nfs_share_map[name] = nfs_share else: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.nfs_share_map[name] def allow_share_access(self, share_name, host_ip, mover_name, access_level=const.ACCESS_LEVEL_RW): @utils.synchronized('emc-shareaccess-' + share_name) def do_allow_access(share_name, host_ip, mover_name, access_level): status, share = self.get(share_name, mover_name) if constants.STATUS_NOT_FOUND == status: message = (_('NFS share %s not found.') % share_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) changed = False rwhosts = share['RwHosts'] rohosts = share['RoHosts'] if access_level == const.ACCESS_LEVEL_RW: if host_ip not in rwhosts: rwhosts.append(host_ip) changed = True if host_ip in rohosts: rohosts.remove(host_ip) changed = True if access_level == const.ACCESS_LEVEL_RO: if host_ip not in rohosts: rohosts.append(host_ip) changed = True if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = share['RootHosts'] if host_ip not in roothosts: roothosts.append(host_ip) changed = True accesshosts = share['AccessHosts'] if host_ip not in accesshosts: accesshosts.append(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_allow_access(share_name, host_ip, mover_name, access_level) def deny_share_access(self, share_name, host_ip, mover_name): @utils.synchronized('emc-shareaccess-' + share_name) def do_deny_access(share_name, host_ip, mover_name): status, share = self.get(share_name, mover_name) if constants.STATUS_OK != status: message = (_('Query nfs share %(path)s failed. ' 'Reason %(err)s.') % {'path': share_name, 'err': share}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) changed = False rwhosts = set(share['RwHosts']) if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = set(share['RootHosts']) if host_ip in roothosts: roothosts.remove(host_ip) changed = True accesshosts = set(share['AccessHosts']) if host_ip in accesshosts: accesshosts.remove(host_ip) changed = True rohosts = set(share['RoHosts']) if host_ip in rohosts: rohosts.remove(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_deny_access(share_name, host_ip, mover_name) def _set_share_access(self, path, mover_name, rw_hosts, ro_hosts, root_hosts, access_hosts): access_str = ('access=%(access)s' % {'access': ':'.join(access_hosts)}) if root_hosts: access_str += (',root=%(root)s' % {'root': ':'.join(root_hosts)}) if rw_hosts: access_str += ',rw=%(rw)s' % {'rw': ':'.join(rw_hosts)} if ro_hosts: access_str += ',ro=%(ro)s' % {'ro': ':'.join(ro_hosts)} set_nfs_share_access_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-ignore', '-option', access_str, path, ] try: self._execute_cmd(set_nfs_share_access_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to set NFS share %(name)s access on ' '%(mover_name)s. Reason: %(err)s.') % {'name': path[1:], 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) manila-2.0.0/manila/share/drivers/emc/plugins/vnx/connection.py0000664000567000056710000007454712701407107025725 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VNX backend for the EMC Manila driver.""" import copy import fnmatch import random from oslo_log import log from oslo_utils import excutils from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila.share.drivers.emc.plugins import base as driver from manila.share.drivers.emc.plugins.vnx import constants from manila.share.drivers.emc.plugins.vnx import object_manager as manager from manila.share.drivers.emc.plugins.vnx import utils as vnx_utils from manila.share import utils as share_utils from manila import utils VERSION = "2.0.0" LOG = log.getLogger(__name__) @vnx_utils.decorate_all_methods(vnx_utils.log_enter_exit, debug_only=True) class VNXStorageConnection(driver.StorageConnection): """Implements VNX specific functionality for EMC Manila driver.""" @vnx_utils.log_enter_exit def __init__(self, *args, **kwargs): super(VNXStorageConnection, self).__init__(*args, **kwargs) self.mover_name = None self.pools = None self.manager = None self.pool_conf = None self.reserved_percentage = None self.driver_handles_share_servers = True def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used.""" share_name = share['id'] size = share['size'] * units.Ki share_proto = share['share_proto'] # Validate the share protocol if share_proto.upper() not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) # Validate share server self._share_server_validation(share_server) if share_proto == 'CIFS': vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Check if CIFS server exists. status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._allocate_container(share_name, size, share_server, pool_name) if share_proto == 'NFS': location = self._create_nfs_share(share_name, share_server) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return location def _share_server_validation(self, share_server): """Validate the share server.""" if not share_server: msg = _('Share server not provided') raise exception.InvalidInput(reason=msg) backend_details = share_server.get('backend_details') vdm = backend_details.get( 'share_server_name') if backend_details else None if vdm is None: message = _("No share server found.") LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _allocate_container(self, share_name, size, share_server, pool_name): """Allocate file system for share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('FileSystem').create( share_name, size, pool_name, vdm_name) def _allocate_container_from_snapshot(self, share, snapshot, share_server, pool_name): """Allocate file system from snapshot.""" vdm_name = self._get_share_server_name(share_server) interconn_id = self._get_context('Mover').get_interconnect_id( self.mover_name, self.mover_name) self._get_context('FileSystem').create_from_snapshot( share['id'], snapshot['id'], snapshot['share_id'], pool_name, vdm_name, interconn_id) nwe_size = share['size'] * units.Ki self._get_context('FileSystem').extend(share['id'], pool_name, nwe_size) @vnx_utils.log_enter_exit def _create_cifs_share(self, share_name, share_server): """Create CIFS share.""" vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Get available CIFS Server and interface (one CIFS server per VDM) status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if 'interfaces' not in server or len(server['interfaces']) == 0: message = (_("CIFS server %s doesn't have interface, " "so the share is inaccessible.") % server['compName']) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) interface = server['interfaces'][0] self._get_context('CIFSShare').create(share_name, server['name'], vdm_name) self._get_context('CIFSShare').disable_share_access(share_name, vdm_name) location = (r'\\%(interface)s\%(name)s' % {'interface': interface, 'name': share_name}) return location @vnx_utils.log_enter_exit def _create_nfs_share(self, share_name, share_server): """Create NFS share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('NFSShare').create(share_name, vdm_name) return ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': share_server['backend_details']['nfs_if'], 'share_name': share_name}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Create a share from a snapshot - clone a snapshot.""" share_name = share['id'] share_proto = share['share_proto'] # Validate the share protocol if share_proto.upper() not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) self._share_server_validation(share_server) self._allocate_container_from_snapshot( share, snapshot, share_server, pool_name) if share_proto == 'NFS': self._create_nfs_share(share_name, share_server) location = ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': share_server['backend_details']['nfs_if'], 'share_name': share_name}) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return location def create_snapshot(self, context, snapshot, share_server=None): """Create snapshot from share.""" share_name = snapshot['share_id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %s not found.") % share_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] self._get_context('Snapshot').create(snapshot['id'], snapshot['share_id'], pool_id) def delete_share(self, context, share, share_server=None): """Delete a share.""" if share_server is None: LOG.warning(_LW("Driver does not support share deletion without " "share network specified. Return directly because " "there is nothing to clean.")) return share_proto = share['share_proto'] if share_proto == 'NFS': self._delete_nfs_share(share, share_server) elif share_proto == 'CIFS': self._delete_cifs_share(share, share_server) else: raise exception.InvalidShare( reason='Unsupported share type') @vnx_utils.log_enter_exit def _delete_cifs_share(self, share, share_server): """Delete CIFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('CIFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @vnx_utils.log_enter_exit def _delete_nfs_share(self, share, share_server): """Delete NFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('NFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @vnx_utils.log_enter_exit def _deallocate_container(self, share_name, vdm_name): """Delete underneath objects of the share.""" path = '/' + share_name try: # Delete mount point self._get_context('MountPoint').delete(path, vdm_name) except Exception: LOG.debug("Skip the failure of mount point %s deletion.", path) try: # Delete file system self._get_context('FileSystem').delete(share_name) except Exception: LOG.debug("Skip the failure of file system %s deletion.", share_name) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" self._get_context('Snapshot').delete(snapshot['id']) def ensure_share(self, context, share, share_server=None): """Ensure that the share is exported.""" def extend_share(self, share, new_size, share_server=None): # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) share_name = share['id'] self._get_context('FileSystem').extend( share_name, pool_name, new_size * units.Ki) def allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" access_level = access['access_level'] if access_level not in const.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=access_level) share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_allow_access(context, share, access, share_server) elif share_proto == 'CIFS': self._cifs_allow_access(context, share, access, share_server) else: raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) @vnx_utils.log_enter_exit def _cifs_allow_access(self, context, share, access, share_server): """Allow access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share') raise exception.InvalidShareAccess(reason=reason) user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._get_context('CIFSShare').allow_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @vnx_utils.log_enter_exit def _nfs_allow_access(self, context, share, access, share_server): """Allow access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason=reason) host_ip = access['access_to'] access_level = access['access_level'] self._get_context('NFSShare').allow_share_access( share['id'], host_ip, vdm_name, access_level) def deny_access(self, context, share, access, share_server=None): """Deny access to a share.""" share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_deny_access(share, access, share_server) elif share_proto == 'CIFS': self._cifs_deny_access(share, access, share_server) else: raise exception.InvalidShare( reason=_('Unsupported share type')) @vnx_utils.log_enter_exit def _cifs_deny_access(self, share, access, share_server): """Deny access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share') raise exception.InvalidShareAccess(reason=reason) user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._get_context('CIFSShare').deny_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @vnx_utils.log_enter_exit def _nfs_deny_access(self, share, access, share_server): """Deny access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason=reason) host_ip = access['access_to'] self._get_context('NFSShare').deny_share_access(share['id'], host_ip, vdm_name) def check_for_setup_error(self): """Check for setup error.""" # To verify the input from Manila configuration status, out = self._get_context('Mover').get_ref(self.mover_name, True) if constants.STATUS_ERROR == status: message = (_("Could not find Data Mover by name: %s.") % self.mover_name) LOG.error(message) raise exception.InvalidParameterValue(err=message) self.pools = self._get_managed_storage_pools(self.pool_conf) def _get_managed_storage_pools(self, pools): matched_pools = set() if pools: # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) real_pools = set([item for item in backend_pools]) conf_pools = set([item.strip() for item in pools.split(",")]) for pool in real_pools: for matcher in conf_pools: if fnmatch.fnmatchcase(pool, matcher): matched_pools.add(pool) nonexistent_pools = real_pools.difference(matched_pools) if not matched_pools: msg = (_("All the specified storage pools to be managed " "do not exist. Please check your configuration " "emc_nas_pool_names in manila.conf. " "The available pools in the backend are %s") % ",".join(real_pools)) raise exception.InvalidParameterValue(err=msg) if nonexistent_pools: LOG.warning(_LW("The following specified storage pools " "do not exist: %(unexist)s. " "This host will only manage the storage " "pools: %(exist)s"), {'unexist': ",".join(nonexistent_pools), 'exist': ",".join(matched_pools)}) else: LOG.debug("Storage pools: %s will be managed.", ",".join(matched_pools)) else: LOG.debug("No storage pool is specified, so all pools " "in storage system will be managed.") return matched_pools def connect(self, emc_share_driver, context): """Connect to VNX NAS server.""" self.mover_name = ( emc_share_driver.configuration.emc_nas_server_container) self.pool_conf = emc_share_driver.configuration.safe_get( 'emc_nas_pool_names') self.reserved_percentage = emc_share_driver.configuration.safe_get( 'reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 configuration = emc_share_driver.configuration self.manager = manager.StorageObjectManager(configuration) def update_share_stats(self, stats_dict): """Communicate with EMCNASClient to get the stats.""" stats_dict['driver_version'] = VERSION self._get_context('Mover').get_ref(self.mover_name, True) stats_dict['pools'] = [] status, pools = self._get_context('StoragePool').get_all() for name, pool in pools.items(): if not self.pools or pool['name'] in self.pools: total_size = float(pool['total_size']) used_size = float(pool['used_size']) pool_stat = dict( pool_name=pool['name'], total_capacity_gb=total_size, free_capacity_gb=total_size - used_size, qos=False, reserved_percentage=self.reserved_percentage, ) stats_dict['pools'].append(pool_stat) if not stats_dict['pools']: message = _("Failed to update storage pool.") LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_pool(self, share): """Get the pool name of the share.""" share_name = share['id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %(name)s not found. " "Reason: %(err)s") % {'name': share_name, 'err': filesystem}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) for name, pool_info in backend_pools.items(): if pool_info['id'] == pool_id: return name available_pools = [item for item in backend_pools] message = (_("No matched pool name for share: %(share)s. " "Available pools: %(pools)s") % {'share': share_name, 'pools': available_pools}) raise exception.EMCVnxXMLAPIError(err=message) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return constants.IP_ALLOCATIONS def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # Only support single security service with type 'active_directory' vdm_name = network_info['server_id'] vlan_id = network_info['segmentation_id'] active_directory = None allocated_interfaces = [] if network_info.get('security_services'): is_valid, active_directory = self._get_valid_security_service( network_info['security_services']) if not is_valid: raise exception.EMCVnxXMLAPIError(err=active_directory) try: if not self._vdm_exist(vdm_name): LOG.debug('Share server %s not found, creating ' 'share server...', vdm_name) self._get_context('VDM').create(vdm_name, self.mover_name) netmask = utils.cidr_to_netmask(network_info['cidr']) devices = self._get_physical_devices(self.mover_name) for net_info in network_info['network_allocations']: random.shuffle(devices) interface = { 'name': net_info['id'][-12:], 'device_name': devices[0], 'ip': net_info['ip_address'], 'mover_name': self.mover_name, 'net_mask': netmask, 'vlan_id': vlan_id if vlan_id else -1, } self._get_context('MoverInterface').create(interface) allocated_interfaces.append(interface) cifs_interface = allocated_interfaces[0] nfs_interface = allocated_interfaces[1] if active_directory: self._configure_active_directory( active_directory, vdm_name, cifs_interface) self._get_context('VDM').attach_nfs_interface( vdm_name, nfs_interface['name']) return { 'share_server_name': vdm_name, 'cifs_if': cifs_interface['ip'], 'nfs_if': nfs_interface['ip'], } except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Could not setup server. Reason: %s.'), ex) server_details = self._construct_backend_details( vdm_name, allocated_interfaces) self.teardown_server( server_details, network_info['security_services']) def _construct_backend_details(self, vdm_name, interfaces): if_number = len(interfaces) cifs_if = interfaces[0]['ip'] if if_number > 0 else None nfs_if = interfaces[1]['ip'] if if_number > 1 else None return { 'share_server_name': vdm_name, 'cifs_if': cifs_if, 'nfs_if': nfs_if, } @vnx_utils.log_enter_exit def _vdm_exist(self, name): status, out = self._get_context('VDM').get(name) if constants.STATUS_OK != status: return False return True def _get_physical_devices(self, mover_name): """Get a proper network device to create interface.""" devices = self._get_context('Mover').get_physical_devices(mover_name) if not devices: message = (_("Could not get physical device port on mover %s.") % self.mover_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return devices def _configure_active_directory( self, security_service, vdm_name, interface): domain = security_service['domain'] server = security_service['dns_ip'] self._get_context('DNSDomain').create(self.mover_name, domain, server) cifs_server_args = { 'name': vdm_name, 'interface_ip': interface['ip'], 'domain_name': security_service['domain'], 'user_name': security_service['user'], 'password': security_service['password'], 'mover_name': vdm_name, 'is_vdm': True, } self._get_context('CIFSServer').create(cifs_server_args) def teardown_server(self, server_details, security_services=None): """Teardown share server.""" if not server_details: LOG.debug('Server details are empty.') return vdm_name = server_details.get('share_server_name') if not vdm_name: LOG.debug('No share server found in server details.') return cifs_if = server_details.get('cifs_if') nfs_if = server_details.get('nfs_if') status, vdm = self._get_context('VDM').get(vdm_name) if constants.STATUS_OK != status: LOG.debug('Share server %s not found.', vdm_name) return interfaces = self._get_context('VDM').get_interfaces(vdm_name) for if_name in interfaces['nfs']: self._get_context('VDM').detach_nfs_interface(vdm_name, if_name) if security_services: # Only support single security service with type 'active_directory' is_valid, active_directory = self._get_valid_security_service( security_services) if is_valid: status, servers = self._get_context('CIFSServer').get_all( vdm_name) if constants.STATUS_OK != status: LOG.error(_LE('Could not find CIFS server by name: %s.'), vdm_name) else: cifs_servers = copy.deepcopy(servers) for name, server in cifs_servers.items(): # Unjoin CIFS Server from domain cifs_server_args = { 'name': server['name'], 'join_domain': False, 'user_name': active_directory['user'], 'password': active_directory['password'], 'mover_name': vdm_name, 'is_vdm': True, } try: self._get_context('CIFSServer').modify( cifs_server_args) except exception.EMCVnxXMLAPIError as expt: LOG.debug("Failed to modify CIFS server " "%(server)s. Reason: %(err)s.", {'server': server, 'err': expt}) self._get_context('CIFSServer').delete(name, vdm_name) # Delete interface from Data Mover if cifs_if: self._get_context('MoverInterface').delete(cifs_if, self.mover_name) if nfs_if: self._get_context('MoverInterface').delete(nfs_if, self.mover_name) # Delete Virtual Data Mover self._get_context('VDM').delete(vdm_name) def _get_valid_security_service(self, security_services): """Validate security services and return a supported security service. :param security_services: :returns: (, ) -- is true to indicate security_services includes zero or single security service for active directory. Otherwise, it would return false. return error message when is false. Otherwise, it will return zero or single security service for active directory. """ # Only support single security service with type 'active_directory' service_number = len(security_services) if (service_number > 1 or security_services[0]['type'] != 'active_directory'): return False, _("Unsupported security services. " "Only support single security service and " "only support type 'active_directory'") return True, security_services[0] def _get_share_server_name(self, share_server): try: return share_server['backend_details']['share_server_name'] except Exception: LOG.debug("Didn't get share server name from share_server %s.", share_server) return share_server['id'] def _get_context(self, type): return self.manager.getStorageContext(type) manila-2.0.0/manila/share/drivers/emc/plugins/__init__.py0000664000567000056710000000000012701407107024457 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/base.py0000664000567000056710000000545412701407107023654 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EMC Share Driver Base Plugin API """ import abc import six @six.add_metaclass(abc.ABCMeta) class StorageConnection(object): """Subclasses should implement storage backend specific functionality.""" def __init__(self, logger): self.logger = logger # NOTE(vponomaryov): redefine 'driver_handles_share_servers' within # plugin. self.driver_handles_share_servers = None @abc.abstractmethod def create_share(self, context, share, share_server): """Is called to create share.""" @abc.abstractmethod def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, context, share, share_server): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" @abc.abstractmethod def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server): """Invoked to extend share.""" @abc.abstractmethod def allow_access(self, context, share, access, share_server): """Allow access to the share.""" @abc.abstractmethod def deny_access(self, context, share, access, share_server): """Deny access to the share.""" def raise_connect_error(self): """Check for setup error.""" pass def connect(self, emc_share_driver, context): """Any initialization the share driver does while starting.""" pass def update_share_stats(self, stats_dict): """Add key/values to stats_dict.""" pass def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 0 @abc.abstractmethod def setup_server(self, network_info, metadata=None): """Set up and configure share server with given network parameters.""" @abc.abstractmethod def teardown_server(self, server_details, security_services=None): """Teardown share server.""" manila-2.0.0/manila/share/drivers/emc/plugins/isilon/0000775000567000056710000000000012701407265023662 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/isilon/isilon_api.py0000664000567000056710000003132012701407107026354 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from enum import Enum from oslo_serialization import jsonutils import requests import six from manila import exception from manila.i18n import _ class IsilonApi(object): def __init__(self, api_url, auth, verify_ssl_cert=True): self.host_url = api_url self.session = requests.session() self.session.auth = auth self.verify_ssl_cert = verify_ssl_cert def create_directory(self, container_path, recursive=False): """Create a directory.""" headers = {"x-isi-ifs-target-type": "container"} url = (self.host_url + "/namespace" + container_path + '?recursive=' + six.text_type(recursive)) r = self.request('PUT', url, headers=headers) return r.status_code == 200 def clone_snapshot(self, snapshot_name, fq_target_dir): self.create_directory(fq_target_dir) snapshot = self.get_snapshot(snapshot_name) snapshot_path = snapshot['path'] # remove /ifs from start of path relative_snapshot_path = snapshot_path[4:] fq_snapshot_path = ('/ifs/.snapshot/' + snapshot_name + relative_snapshot_path) self._clone_directory_contents(fq_snapshot_path, fq_target_dir, snapshot_name, relative_snapshot_path) def _clone_directory_contents(self, fq_source_dir, fq_target_dir, snapshot_name, relative_path): dir_listing = self.get_directory_listing(fq_source_dir) for item in dir_listing['children']: name = item['name'] source_item_path = fq_source_dir + '/' + name new_relative_path = relative_path + '/' + name dest_item_path = fq_target_dir + '/' + name if item['type'] == 'container': # create the container name in the target dir & clone dir self.create_directory(dest_item_path) self._clone_directory_contents(source_item_path, dest_item_path, snapshot_name, new_relative_path) elif item['type'] == 'object': self.clone_file_from_snapshot('/ifs' + new_relative_path, dest_item_path, snapshot_name) def clone_file_from_snapshot(self, fq_file_path, fq_dest_path, snapshot_name): headers = {'x-isi-ifs-copy-source': '/namespace' + fq_file_path} snapshot_suffix = '&snapshot=' + snapshot_name url = (self.host_url + '/namespace' + fq_dest_path + '?clone=true' + snapshot_suffix) self.request('PUT', url, headers=headers) def get_directory_listing(self, fq_dir_path): url = self.host_url + '/namespace' + fq_dir_path + '?detail=default' r = self.request('GET', url) r.raise_for_status() return r.json() def is_path_existent(self, resource_path): url = self.host_url + '/namespace' + resource_path r = self.request('HEAD', url) if r.status_code == 200: return True elif r.status_code == 404: return False else: r.raise_for_status() def get_snapshot(self, snapshot_name): r = self.request('GET', self.host_url + '/platform/1/snapshot/snapshots/' + snapshot_name) snapshot_json = r.json() if r.status_code == 200: return snapshot_json['snapshots'][0] elif r.status_code == 404: return None else: r.raise_for_status() def get_snapshots(self): r = self.request('GET', self.host_url + '/platform/1/snapshot/snapshots') if r.status_code == 200: return r.json() else: r.raise_for_status() def lookup_nfs_export(self, share_path): response = self.session.get( self.host_url + '/platform/1/protocols/nfs/exports', verify=self.verify_ssl_cert) nfs_exports_json = response.json() for export in nfs_exports_json['exports']: for path in export['paths']: if path == share_path: return export['id'] return None def get_nfs_export(self, export_id): response = self.request('GET', self.host_url + '/platform/1/protocols/nfs/exports/' + six.text_type(export_id)) if response.status_code == 200: return response.json()['exports'][0] else: return None def lookup_smb_share(self, share_name): response = self.session.get( self.host_url + '/platform/1/protocols/smb/shares/' + share_name) if response.status_code == 200: return response.json()['shares'][0] else: return None def create_nfs_export(self, export_path): """Creates an NFS export using the Platform API. :param export_path: a string specifying the desired export path :return: "True" if created successfully; "False" otherwise """ data = {'paths': [export_path]} url = self.host_url + '/platform/1/protocols/nfs/exports' response = self.request('POST', url, data=data) return response.status_code == 201 def create_smb_share(self, share_name, share_path): """Creates an SMB/CIFS share. :param share_name: the name of the CIFS share :param share_path: the path associated with the CIFS share :return: "True" if the share created successfully; returns "False" otherwise """ data = {'permissions': []} data['name'] = share_name data['path'] = share_path url = self.host_url + '/platform/1/protocols/smb/shares' response = self.request('POST', url, data=data) return response.status_code == 201 def create_snapshot(self, snapshot_name, snapshot_path): """Creates a snapshot.""" data = {'name': snapshot_name, 'path': snapshot_path} r = self.request('POST', self.host_url + '/platform/1/snapshot/snapshots', data=data) if r.status_code == 201: return True else: r.raise_for_status() def delete(self, fq_resource_path, recursive=False): """Deletes a file or folder.""" r = self.request('DELETE', self.host_url + '/namespace' + fq_resource_path + '?recursive=' + six.text_type(recursive)) r.raise_for_status() def delete_nfs_share(self, share_number): response = self.session.delete( self.host_url + '/platform/1/protocols/nfs/exports' + '/' + six.text_type(share_number)) return response.status_code == 204 def delete_smb_share(self, share_name): url = self.host_url + '/platform/1/protocols/smb/shares/' + share_name response = self.request('DELETE', url) return response.status_code == 204 def delete_snapshot(self, snapshot_name): response = self.request( 'DELETE', '{0}/platform/1/snapshot/snapshots/{1}' .format(self.host_url, snapshot_name)) response.raise_for_status() def quota_create(self, path, quota_type, size): thresholds = {'hard': size} data = { 'path': path, 'type': quota_type, 'include_snapshots': False, 'thresholds_include_overhead': False, 'enforced': True, 'thresholds': thresholds, } response = self.request( 'POST', '{0}/platform/1/quota/quotas'.format(self.host_url), data=data) response.raise_for_status() def quota_get(self, path, quota_type): response = self.request( 'GET', '{0}/platform/1/quota/quotas?path={1}'.format(self.host_url, path), ) if response.status_code == 404: return None elif response.status_code != 200: response.raise_for_status() json = response.json() len_returned_quotas = len(json['quotas']) if len_returned_quotas == 0: return None elif len_returned_quotas == 1: return json['quotas'][0] else: message = (_('Greater than one quota returned when querying ' 'quotas associated with share path: %(path)s .') % {'path': path}) raise exception.ShareBackendException(msg=message) def quota_modify_size(self, quota_id, new_size): data = {'thresholds': {'hard': new_size}} response = self.request( 'PUT', '{0}/platform/1/quota/quotas/{1}'.format(self.host_url, quota_id), data=data ) response.raise_for_status() def quota_set(self, path, quota_type, size): """Sets a quota of the given type and size on the given path.""" quota_json = self.quota_get(path, quota_type) if quota_json is None: self.quota_create(path, quota_type, size) else: # quota already exists, modify it's size quota_id = quota_json['id'] self.quota_modify_size(quota_id, size) def smb_permissions_add(self, share_name, user, smb_permission): smb_share = self.lookup_smb_share(share_name) permissions = smb_share['permissions'] # lookup given user string user_json = self.auth_lookup_user(user) auth_mappings = user_json['mapping'] if len(auth_mappings) > 1: message = (_('More than one mapping found for user "%(user)s".') % {'user': user}) raise exception.ShareBackendException(msg=message) user_sid = auth_mappings[0]['user']['sid'] new_permission = { 'permission': smb_permission.value, 'permission_type': 'allow', 'trustee': user_sid } url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self.host_url, share_name) new_permissions = list(permissions) new_permissions.append(new_permission) data = {'permissions': new_permissions} r = self.request('PUT', url, data=data) r.raise_for_status() def smb_permissions_remove(self, share_name, user): smb_share = self.lookup_smb_share(share_name) permissions = smb_share['permissions'] # find the perm to remove perm_to_remove = None for perm in list(permissions): if perm['trustee']['name'] == user: perm_to_remove = perm if perm_to_remove is not None: permissions.remove(perm) else: message = _('Attempting to remove permission for user "%(user)s", ' 'but this user was not found in the share\'s ' '(%(share)s) permissions list.') % {'user': user, 'share': smb_share} raise exception.ShareBackendException(msg=message) self.request('PUT', '{0}/platform/1/protocols/smb/shares/{1}'.format( self.host_url, share_name), data={'permissions': permissions}) def auth_lookup_user(self, user_string): url = '{0}/platform/1/auth/mapping/users/lookup'.format(self.host_url) r = self.request('GET', url, params={"user": user_string}) if r.status_code == 404: raise exception.ShareBackendException(msg='user not found') elif r.status_code != 200: r.raise_for_status() return r.json() def request(self, method, url, headers=None, data=None, params=None): if data is not None: data = jsonutils.dumps(data) r = self.session.request(method, url, headers=headers, data=data, verify=self.verify_ssl_cert, params=params) return r class SmbPermission(Enum): full = 'full' rw = 'change' ro = 'read' manila-2.0.0/manila/share/drivers/emc/plugins/isilon/__init__.py0000664000567000056710000000000012701407107025754 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/emc/plugins/isilon/isilon.py0000664000567000056710000004017712701407107025535 0ustar jenkinsjenkins00000000000000# Copyright 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Isilon specific NAS backend plugin. """ import os from oslo_config import cfg from oslo_log import log from oslo_utils import units import six from manila.common import constants as const from manila import exception from manila.i18n import _, _LW from manila.share.drivers.emc.plugins import base from manila.share.drivers.emc.plugins.isilon import isilon_api CONF = cfg.CONF VERSION = "0.1.0" LOG = log.getLogger(__name__) class IsilonStorageConnection(base.StorageConnection): """Implements Isilon specific functionality for EMC Manila driver.""" def __init__(self, *args, **kwargs): super(IsilonStorageConnection, self).__init__(*args, **kwargs) self._server = None self._port = None self._username = None self._password = None self._server_url = None self._connect_resp = None self._root_dir = None self._verify_ssl_cert = None self._containers = {} self._shares = {} self._snapshots = {} self._isilon_api = None self._isilon_api_class = isilon_api.IsilonApi self.driver_handles_share_servers = False def _get_container_path(self, share): """Return path to a container.""" return os.path.join(self._root_dir, share['name']) def create_share(self, context, share, share_server): """Is called to create share.""" if share['share_proto'] == 'NFS': location = self._create_nfs_share(share) elif share['share_proto'] == 'CIFS': location = self._create_cifs_share(share) else: message = (_('Unsupported share protocol: %(proto)s.') % {'proto': share['share_proto']}) LOG.error(message) raise exception.InvalidShare(reason=message) # apply directory quota based on share size max_share_size = share['size'] * units.Gi self._isilon_api.quota_create( self._get_container_path(share), 'directory', max_share_size) return location def create_share_from_snapshot(self, context, share, snapshot, share_server): """Creates a share from the snapshot.""" # Create share at new location location = self.create_share(context, share, share_server) # Clone snapshot to new location fq_target_dir = self._get_container_path(share) self._isilon_api.clone_snapshot(snapshot['name'], fq_target_dir) return location def _create_nfs_share(self, share): """Is called to create nfs share.""" container_path = self._get_container_path(share) self._isilon_api.create_directory(container_path) share_created = self._isilon_api.create_nfs_export(container_path) if not share_created: message = ( _('The requested NFS share "%(share)s" was not created.') % {'share': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) location = '{0}:{1}'.format(self._server, container_path) return location def _create_cifs_share(self, share): """Is called to create cifs share.""" # Create the directory container_path = self._get_container_path(share) self._isilon_api.create_directory(container_path) self._isilon_api.create_smb_share(share['name'], container_path) share_path = '\\\\{0}\\{1}'.format(self._server, share['name']) return share_path def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" snapshot_path = os.path.join(self._root_dir, snapshot['share_name']) self._isilon_api.create_snapshot(snapshot['name'], snapshot_path) def delete_share(self, context, share, share_server): """Is called to remove share.""" if share['share_proto'] == 'NFS': self._delete_nfs_share(share) elif share['share_proto'] == 'CIFS': self._delete_cifs_share(share) else: message = (_('Unsupported share type: %(type)s.') % {'type': share['share_proto']}) LOG.error(message) raise exception.InvalidShare(reason=message) def _delete_nfs_share(self, share): """Is called to remove nfs share.""" share_id = self._isilon_api.lookup_nfs_export( self._root_dir + '/' + share['name']) if share_id is None: lw = _LW('Attempted to delete NFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: # attempt to delete the share export_deleted = self._isilon_api.delete_nfs_share(share_id) if not export_deleted: message = _('Error deleting NFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def _delete_cifs_share(self, share): """Is called to remove CIFS share.""" smb_share = self._isilon_api.lookup_smb_share(share['name']) if smb_share is None: lw = _LW('Attempted to delete CIFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: share_deleted = self._isilon_api.delete_smb_share(share['name']) if not share_deleted: message = _('Error deleting CIFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" self._isilon_api.delete_snapshot(snapshot['name']) def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" def extend_share(self, share, new_size, share_server=None): """Extends a share.""" new_quota_size = new_size * units.Gi self._isilon_api.quota_set( self._get_container_path(share), 'directory', new_quota_size) def allow_access(self, context, share, access, share_server): """Allow access to the share.""" if share['share_proto'] == 'NFS': self._nfs_allow_access(share, access) elif share['share_proto'] == 'CIFS': self._cifs_allow_access(share, access) else: message = _( 'Unsupported share protocol: %s. Only "NFS" and ' '"CIFS" are currently supported share protocols.') % share[ 'share_proto'] LOG.error(message) raise exception.InvalidShare(reason=message) def _nfs_allow_access(self, share, access): """Allow access to nfs share.""" access_type = access['access_type'] if access_type != 'ip': message = _('Only "ip" access type allowed for the NFS' 'protocol.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) export_path = self._get_container_path(share) access_ip = access['access_to'] access_level = access['access_level'] share_id = self._isilon_api.lookup_nfs_export(export_path) share_access_group = 'clients' if access_level == const.ACCESS_LEVEL_RO: share_access_group = 'read_only_clients' # Get current allowed clients export = self._get_existing_nfs_export(share_id) current_clients = export[share_access_group] # Format of ips could be '10.0.0.2', or '10.0.0.2, 10.0.0.0/24' ips = list() ips.append(access_ip) ips.extend(current_clients) export_params = {share_access_group: ips} url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self._server_url, share_id) resp = self._isilon_api.request('PUT', url, data=export_params) resp.raise_for_status() def _cifs_allow_access(self, share, access): access_type = access['access_type'] access_to = access['access_to'] access_level = access['access_level'] if access_type == 'ip': access_ip = access['access_to'] self._cifs_allow_access_ip(access_ip, share, access_level) elif access_type == 'user': self._cifs_allow_access_user(access_to, share, access_level) else: message = _('Only "ip" and "user" access types allowed for ' 'CIFS protocol.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) def _cifs_allow_access_ip(self, ip, share, access_level): if access_level == const.ACCESS_LEVEL_RO: message = _('Only RW Access allowed for CIFS Protocol when using ' 'the "ip" access type.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) allowed_ip = 'allow:' + ip smb_share = self._isilon_api.lookup_smb_share(share['name']) host_acl = smb_share['host_acl'] if allowed_ip not in host_acl: host_acl.append(allowed_ip) data = {'host_acl': host_acl} url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self._server_url, smb_share['name'])) r = self._isilon_api.request('PUT', url, data=data) r.raise_for_status() def _cifs_allow_access_user(self, user, share, access_level): if access_level == const.ACCESS_LEVEL_RW: smb_permission = isilon_api.SmbPermission.rw elif access_level == const.ACCESS_LEVEL_RO: smb_permission = isilon_api.SmbPermission.ro else: message = _('Only "RW" and "RO" access levels are supported.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) self._isilon_api.smb_permissions_add(share['name'], user, smb_permission) def deny_access(self, context, share, access, share_server): """Deny access to the share.""" if share['share_proto'] == 'NFS': self._nfs_deny_access(share, access) elif share['share_proto'] == 'CIFS': self._cifs_deny_access(share, access) def _nfs_deny_access(self, share, access): """Deny access to nfs share.""" if access['access_type'] != 'ip': return denied_ip = access['access_to'] access_level = access['access_level'] share_access_group = 'clients' if access_level == const.ACCESS_LEVEL_RO: share_access_group = 'read_only_clients' # Get list of currently allowed client ips export_id = self._isilon_api.lookup_nfs_export( self._get_container_path(share)) if export_id is None: message = _('Share %s should have been created, but was not ' 'found.') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) export = self._get_existing_nfs_export(export_id) try: clients = export[share_access_group] except KeyError: message = (_('Export %(export_name)s should have contained the ' 'JSON key %(json_key)s, but this key was not found.') % {'export_name': share['name'], 'json_key': share_access_group}) LOG.error(message) raise exception.ShareBackendException(msg=message) allowed_ips = set(clients) if allowed_ips.__contains__(denied_ip): allowed_ips.remove(denied_ip) data = {share_access_group: list(allowed_ips)} url = ('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._server_url, six.text_type(export_id))) r = self._isilon_api.request('PUT', url, data=data) r.raise_for_status() def _get_existing_nfs_export(self, export_id): export = self._isilon_api.get_nfs_export(export_id) if export is None: message = _('NFS share with export id %d should have been ' 'created, but was not found.') % export_id LOG.error(message) raise exception.ShareBackendException(msg=message) return export def _cifs_deny_access(self, share, access): access_type = access['access_type'] if access_type == 'ip': self._cifs_deny_access_ip(access['access_to'], share) elif access_type == 'user': self._cifs_deny_access_user(share, access) else: message = _('Access type for CIFS deny access request was ' '"%(access_type)s". Only "user" and "ip" access types ' 'are supported for CIFS protocol access.') % { 'access_type': access_type} LOG.warning(message) def _cifs_deny_access_ip(self, denied_ip, share): """Deny access to cifs share.""" share_json = self._isilon_api.lookup_smb_share(share['name']) host_acl_list = share_json['host_acl'] allow_ip = 'allow:' + denied_ip if allow_ip in host_acl_list: host_acl_list.remove(allow_ip) share_params = {"host_acl": host_acl_list} url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self._server_url, share['name'])) resp = self._isilon_api.request('PUT', url, data=share_params) resp.raise_for_status() def _cifs_deny_access_user(self, share, access): self._isilon_api.smb_permissions_remove(share['name'], access[ 'access_to']) def check_for_setup_error(self): """Check for setup error.""" def connect(self, emc_share_driver, context): """Connect to an Isilon cluster.""" self._server = emc_share_driver.configuration.safe_get( "emc_nas_server") self._port = ( int(emc_share_driver.configuration.safe_get("emc_nas_server_port")) ) self._server_url = ('https://' + self._server + ':' + six.text_type(self._port)) self._username = emc_share_driver.configuration.safe_get( "emc_nas_login") self._password = emc_share_driver.configuration.safe_get( "emc_nas_password") self._root_dir = emc_share_driver.configuration.safe_get( "emc_nas_root_dir") # TODO(Shaun Edwards): make verify ssl a config variable? self._verify_ssl_cert = False self._isilon_api = self._isilon_api_class(self._server_url, auth=( self._username, self._password), verify_ssl_cert=self._verify_ssl_cert) if not self._isilon_api.is_path_existent(self._root_dir): self._isilon_api.create_directory(self._root_dir, recursive=True) def update_share_stats(self, stats_dict): """TODO.""" # TODO(Shaun Edwards): query capacity, set storage_protocol, # QoS support? stats_dict['driver_version'] = VERSION def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" # TODO(Shaun Edwards) return 0 def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # TODO(Shaun Edwards): Look into supporting share servers def teardown_server(self, server_details, security_services=None): """Teardown share server.""" # TODO(Shaun Edwards): Look into supporting share servers manila-2.0.0/manila/share/drivers/emc/driver.py0000664000567000056710000001357612701407107022560 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ EMC specific NAS storage driver. This driver is a pluggable driver that allows specific EMC NAS devices to be plugged-in as the underlying backend. Use the Manila configuration variable "share_backend_name" to specify, which backend plugins to use. """ from oslo_config import cfg from oslo_log import log from manila.share import driver from manila.share.drivers.emc import plugin_manager as manager LOG = log.getLogger(__name__) EMC_NAS_OPTS = [ cfg.StrOpt('emc_nas_login', help='User name for the EMC server.'), cfg.StrOpt('emc_nas_password', help='Password for the EMC server.'), cfg.StrOpt('emc_nas_server', help='EMC server hostname or IP address.'), cfg.PortOpt('emc_nas_server_port', default=8080, help='Port number for the EMC server.'), cfg.BoolOpt('emc_nas_server_secure', default=True, help='Use secure connection to server.'), cfg.StrOpt('emc_share_backend', help='Share backend.'), cfg.StrOpt('emc_nas_server_container', default='server_2', help='Container of share servers.'), cfg.StrOpt('emc_nas_pool_names', deprecated_name='emc_nas_pool_name', help='EMC pool names.'), cfg.StrOpt('emc_nas_root_dir', help='The root directory where shares will be located.'), ] CONF = cfg.CONF CONF.register_opts(EMC_NAS_OPTS) class EMCShareDriver(driver.ShareDriver): """EMC specific NAS driver. Allows for NFS and CIFS NAS storage usage.""" def __init__(self, *args, **kwargs): self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(EMC_NAS_OPTS) self.backend_name = self.configuration.safe_get( 'emc_share_backend') else: self.backend_name = CONF.emc_share_backend self.backend_name = self.backend_name or 'EMC_NAS_Storage' self.plugin_manager = manager.EMCPluginManager( namespace='manila.share.drivers.emc.plugins') self.plugin = self.plugin_manager.load_plugin(self.backend_name, LOG) super(EMCShareDriver, self).__init__( self.plugin.driver_handles_share_servers, *args, **kwargs) def create_share(self, context, share, share_server=None): """Is called to create share.""" location = self.plugin.create_share(context, share, share_server) return location def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" location = self.plugin.create_share_from_snapshot( context, share, snapshot, share_server) return location def extend_share(self, share, new_size, share_server=None): """Is called to extend share.""" self.plugin.extend_share(share, new_size, share_server) def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" self.plugin.create_snapshot(context, snapshot, share_server) def delete_share(self, context, share, share_server=None): """Is called to remove share.""" self.plugin.delete_share(context, share, share_server) def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" self.plugin.delete_snapshot(context, snapshot, share_server) def ensure_share(self, context, share, share_server=None): """Invoked to sure that share is exported.""" self.plugin.ensure_share(context, share, share_server) def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" self.plugin.allow_access(context, share, access, share_server) def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" self.plugin.deny_access(context, share, access, share_server) def check_for_setup_error(self): """Check for setup error.""" self.plugin.check_for_setup_error() def do_setup(self, context): """Any initialization the share driver does while starting.""" self.plugin.connect(self, context) def _update_share_stats(self): """Retrieve stats info from share.""" backend_name = self.configuration.safe_get( 'share_backend_name') or "EMC_NAS_Storage" data = dict( share_backend_name=backend_name, vendor_name='EMC', storage_protocol='NFS_CIFS') self.plugin.update_share_stats(data) super(EMCShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return self.plugin.get_network_allocations_number() def _setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" return self.plugin.setup_server(network_info, metadata) def _teardown_server(self, server_details, security_services=None): """Teardown share server.""" return self.plugin.teardown_server(server_details, security_services) manila-2.0.0/manila/share/drivers/lvm.py0000664000567000056710000003551712701407107021316 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM Driver for shares. """ import math import os import re from oslo_config import cfg from oslo_log import log from oslo_utils import importutils import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila.share import driver from manila.share.drivers import generic LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('lvm_share_export_root', default='$state_path/mnt', help='Base folder where exported shares are located.'), cfg.StrOpt('lvm_share_export_ip', help='IP to be added to export string.'), cfg.IntOpt('lvm_share_mirrors', default=0, help='If set, create LVMs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space.'), cfg.StrOpt('lvm_share_volume_group', default='lvm-shares', help='Name for the VG that will contain exported shares.'), cfg.ListOpt('lvm_share_helpers', default=[ 'CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess', 'NFS=manila.share.drivers.helpers.NFSHelper', ], help='Specify list of share export helpers.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) CONF.register_opts(generic.share_opts) class LVMMixin(driver.ExecuteMixin): def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" out, err = self._execute('vgs', '--noheadings', '-o', 'name', run_as_root=True) volume_groups = out.split() if self.configuration.lvm_share_volume_group not in volume_groups: msg = (_("share volume group %s doesn't exist") % self.configuration.lvm_share_volume_group) raise exception.InvalidParameterValue(err=msg) if not self.configuration.lvm_share_export_ip: msg = (_("lvm_share_export_ip isn't specified")) raise exception.InvalidParameterValue(err=msg) def _allocate_container(self, share): sizestr = '%sG' % share['size'] cmd = ['lvcreate', '-L', sizestr, '-n', share['name'], self.configuration.lvm_share_volume_group] if self.configuration.lvm_share_mirrors: cmd += ['-m', self.configuration.lvm_share_mirrors, '--nosync'] terras = int(sizestr[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd += ['-R', six.text_type(rsize)] self._try_execute(*cmd, run_as_root=True) device_name = self._get_local_path(share) self._execute('mkfs.%s' % self.configuration.share_volume_fstype, device_name, run_as_root=True) def _extend_container(self, share, device_name, size): cmd = ['lvextend', '-L', '%sG' % size, '-n', device_name] self._try_execute(*cmd, run_as_root=True) def _deallocate_container(self, share_name): """Deletes a logical volume for share.""" try: self._try_execute('lvremove', '-f', "%s/%s" % (self.configuration.lvm_share_volume_group, share_name), run_as_root=True) except exception.ProcessExecutionError as exc: if "not found" not in exc.stderr: LOG.exception(_LE("Error deleting volume")) raise LOG.warning(_LW("Volume not found: %s") % exc.stderr) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" orig_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group, snapshot['share_name']) self._try_execute( 'lvcreate', '-L', '%sG' % snapshot['share']['size'], '--name', snapshot['name'], '--snapshot', orig_lv_name, run_as_root=True) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" self._deallocate_container(snapshot['name']) class LVMShareDriver(LVMMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, *args, **kwargs): """Do initialization.""" super(LVMShareDriver, self).__init__([False], *args, **kwargs) self.configuration.append_config_values(share_opts) self.configuration.append_config_values(generic.share_opts) self.configuration.share_mount_path = ( self.configuration.lvm_share_export_root) self._helpers = None self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'LVM' # Set of parameters used for compatibility with # Generic driver's helpers. self.share_server = { 'public_address': self.configuration.lvm_share_export_ip, 'instance_id': self.backend_name, 'lock_name': 'manila_lvm', } def _ssh_exec_as_root(self, server, command, check_exit_code=True): kwargs = {} if 'sudo' in command: kwargs['run_as_root'] = True command.remove('sudo') kwargs['check_exit_code'] = check_exit_code return self._execute(*command, **kwargs) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(LVMShareDriver, self).do_setup(context) self._setup_helpers() def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.lvm_share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) # TODO(rushiagr): better way to handle configuration # instead of just passing to the helper self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec_as_root, self.configuration) def _get_local_path(self, share): # The escape characters are expected by the device mapper. escaped_group = ( self.configuration.lvm_share_volume_group.replace('-', '--')) escaped_name = share['name'].replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'NFS_CIFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'consistency_group_support': None, 'snapshot_support': True, 'driver_name': 'LVMShareDriver', 'pools': self.get_share_server_pools() } super(LVMShareDriver, self)._update_share_stats(data) def get_share_server_pools(self, share_server=None): out, err = self._execute('vgs', self.configuration.lvm_share_volume_group, '--rows', '--units', 'g', run_as_root=True) total_size = re.findall("VSize\s[0-9.]+g", out)[0][6:-1] free_size = re.findall("VFree\s[0-9.]+g", out)[0][6:-1] return [{ 'pool_name': 'lvm-single-pool', 'total_capacity_gb': float(total_size), 'free_capacity_gb': float(free_size), 'reserved_percentage': 0, }, ] def create_share(self, context, share, share_server=None): self._allocate_container(share) # create file system device_name = self._get_local_path(share) location = self._get_helper(share).create_export(self.share_server, share['name']) self._mount_device(share, device_name) return location def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" self._allocate_container(share) device_name = self._get_local_path(snapshot) self._copy_volume(device_name, self._get_local_path(share), share['size']) location = self._get_helper(share).create_export(self.share_server, share['name']) self._mount_device(share, device_name) return location def delete_share(self, context, share, share_server=None): self._remove_export(context, share) self._delete_share(context, share) self._deallocate_container(share['name']) def _remove_export(self, ctx, share): """Removes an access rules for a share.""" mount_path = self._get_mount_path(share) if os.path.exists(mount_path): # umount, may be busy try: self._execute('umount', '-f', mount_path, run_as_root=True) except exception.ProcessExecutionError as exc: if 'device is busy' in six.text_type(exc): raise exception.ShareBusyException(reason=share['name']) else: LOG.info(_LI('Unable to umount: %s'), exc) # remove dir try: os.rmdir(mount_path) except OSError: LOG.warning(_LI('Unable to delete %s'), mount_path) def ensure_share(self, ctx, share, share_server=None): """Ensure that storage are mounted and exported.""" device_name = self._get_local_path(share) self._mount_device(share, device_name) self._get_helper(share).create_export(self.share_server, share['name'], recreate=True) def _delete_share(self, ctx, share): """Delete a share.""" try: self._get_helper(share).remove_export(self.share_server, share['name']) except exception.ProcessExecutionError: LOG.warning(_LI("Can't remove share %r"), share['id']) except exception.InvalidShare as exc: LOG.warning(exc.message) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ self._get_helper(share).update_access(self.share_server, share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) def _get_helper(self, share): if share['share_proto'].lower().startswith('nfs'): return self._helpers['NFS'] elif share['share_proto'].lower().startswith('cifs'): return self._helpers['CIFS'] else: raise exception.InvalidShare(reason='Wrong share protocol') def _mount_device(self, share, device_name): """Mount LVM share and ignore if already mounted.""" mount_path = self._get_mount_path(share) self._execute('mkdir', '-p', mount_path) try: self._execute('mount', device_name, mount_path, run_as_root=True, check_exit_code=True) self._execute('chmod', '777', mount_path, run_as_root=True, check_exit_code=True) except exception.ProcessExecutionError: out, err = self._execute('mount', '-l', run_as_root=True) if device_name in out: LOG.warning(_LW("%s is already mounted"), device_name) else: raise return mount_path def _unmount_device(self, share): mount_path = self._get_mount_path(share) self._execute('umount', mount_path, run_as_root=True) self._execute('rmdir', mount_path, run_as_root=True) def _get_mount_path(self, share): """Returns path where share is mounted.""" return os.path.join(self.configuration.share_mount_path, share['name']) def _copy_volume(self, srcstr, deststr, size_in_g): # Use O_DIRECT to avoid thrashing the system buffer cache extra_flags = ['iflag=direct', 'oflag=direct'] # Check whether O_DIRECT is supported try: self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr, *extra_flags, run_as_root=True) except exception.ProcessExecutionError: extra_flags = [] # Perform the copy self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % (size_in_g * 1024), 'bs=1M', *extra_flags, run_as_root=True) def extend_share(self, share, new_size, share_server=None): device_name = self._get_local_path(share) self._extend_container(share, device_name, new_size) self._execute('resize2fs', device_name, run_as_root=True) manila-2.0.0/manila/share/drivers/cephfs/0000775000567000056710000000000012701407265021410 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/cephfs/__init__.py0000664000567000056710000000000012701407107023502 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/cephfs/cephfs_native.py0000664000567000056710000002712312701407112024574 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _, _LI, _LW from manila.share import driver from manila.share import share_types try: import ceph_volume_client ceph_module_found = True except ImportError as e: ceph_volume_client = None ceph_module_found = False CEPHX_ACCESS_TYPE = "cephx" # The default Ceph administrative identity CEPH_DEFAULT_AUTH_ID = "admin" LOG = log.getLogger(__name__) cephfs_native_opts = [ cfg.StrOpt('cephfs_conf_path', default="", help="Fully qualified path to the ceph.conf file."), cfg.StrOpt('cephfs_cluster_name', help="The name of the cluster in use, if it is not " "the default ('ceph')." ), cfg.StrOpt('cephfs_auth_id', default="manila", help="The name of the ceph auth identity to use." ), cfg.BoolOpt('cephfs_enable_snapshots', default=False, help="Whether to enable snapshots in this driver." ), ] CONF = cfg.CONF CONF.register_opts(cephfs_native_opts) class CephFSNativeDriver(driver.ShareDriver,): """Driver for the Ceph Filsystem. This driver is 'native' in the sense that it exposes a CephFS filesystem for use directly by guests, with no intermediate layer like NFS. """ supported_protocols = ('CEPHFS',) def __init__(self, *args, **kwargs): super(CephFSNativeDriver, self).__init__(False, *args, **kwargs) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'CephFS-Native' self._volume_client = None self.configuration.append_config_values(cephfs_native_opts) def _update_share_stats(self): stats = self.volume_client.rados.get_cluster_stats() total_capacity_gb = stats['kb'] * units.Mi free_capacity_gb = stats['kb_avail'] * units.Mi data = { 'consistency_group_support': 'pool', 'vendor_name': 'Ceph', 'driver_version': '1.0', 'share_backend_name': self.backend_name, 'storage_protocol': "CEPHFS", 'pools': [ { 'pool_name': 'cephfs', 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [False], 'compression': [False], 'thin_provisioning': [False] } ], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'snapshot_support': self.configuration.safe_get( 'cephfs_enable_snapshots'), } super(CephFSNativeDriver, self)._update_share_stats(data) def _to_bytes(self, gigs): """Convert a Manila size into bytes. Manila uses gibibytes everywhere. :param gigs: integer number of gibibytes. :return: integer number of bytes. """ return gigs * units.Gi @property def volume_client(self): if self._volume_client: return self._volume_client if not ceph_module_found: raise exception.ManilaException( _("Ceph client libraries not found.") ) conf_path = self.configuration.safe_get('cephfs_conf_path') cluster_name = self.configuration.safe_get('cephfs_cluster_name') auth_id = self.configuration.safe_get('cephfs_auth_id') self._volume_client = ceph_volume_client.CephFSVolumeClient( auth_id, conf_path, cluster_name) LOG.info(_LI("[%(be)s}] Ceph client found, connecting..."), {"be": self.backend_name}) if auth_id != CEPH_DEFAULT_AUTH_ID: # Evict any other manila sessions. Only do this if we're # using a client ID that isn't the default admin ID, to avoid # rudely disrupting anyone else. premount_evict = auth_id else: premount_evict = None try: self._volume_client.connect(premount_evict=premount_evict) except Exception: self._volume_client = None raise else: LOG.info(_LI("[%(be)s] Ceph client connection complete."), {"be": self.backend_name}) return self._volume_client def _share_path(self, share): """Get VolumePath from Share.""" return ceph_volume_client.VolumePath( share['consistency_group_id'], share['id']) def create_share(self, context, share, share_server=None): """Create a CephFS volume. :param context: A RequestContext. :param share: A Share. :param share_server: Always None for CephFS native. :return: The export locations dictionary. """ # `share` is a Share LOG.debug("create_share {be} name={id} size={size} cg_id={cg}".format( be=self.backend_name, id=share['id'], size=share['size'], cg=share['consistency_group_id'])) extra_specs = share_types.get_extra_specs_from_share(share) data_isolated = extra_specs.get("cephfs:data_isolated", False) size = self._to_bytes(share['size']) # Create the CephFS volume volume = self.volume_client.create_volume( self._share_path(share), size=size, data_isolated=data_isolated) # To mount this you need to know the mon IPs and the path to the volume mon_addrs = self.volume_client.get_mon_addrs() export_location = "{addrs}:{path}".format( addrs=",".join(mon_addrs), path=volume['mount_path']) LOG.info(_LI("Calculated export location for share %(id)s: %(loc)s"), {"id": share['id'], "loc": export_location}) return { 'path': export_location, 'is_admin_only': False, 'metadata': {}, } def _allow_access(self, context, share, access, share_server=None): if access['access_type'] != CEPHX_ACCESS_TYPE: raise exception.InvalidShareAccess( reason=_("Only 'cephx' access type allowed.")) if access['access_level'] == constants.ACCESS_LEVEL_RO: raise exception.InvalidShareAccessLevel( level=constants.ACCESS_LEVEL_RO) ceph_auth_id = access['access_to'] auth_result = self.volume_client.authorize(self._share_path(share), ceph_auth_id) return auth_result['auth_key'] def _deny_access(self, context, share, access, share_server=None): if access['access_type'] != CEPHX_ACCESS_TYPE: LOG.warning(_LW("Invalid access type '%(type)s', " "ignoring in deny."), {"type": access['access_type']}) return self.volume_client.deauthorize(self._share_path(share), access['access_to']) self.volume_client.evict(access['access_to']) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): # The interface to Ceph just provides add/remove methods, since it # was created at start of mitaka cycle when there was no requirement # to be able to list access rules or set them en masse. Therefore # we implement update_access as best we can. In future ceph's # interface should be extended to enable a full implementation # of update_access. for rule in add_rules: self._allow_access(context, share, rule) for rule in delete_rules: self._deny_access(context, share, rule) # This is where we would list all permitted clients and remove # those that are not in `access_rules` if the ceph interface # enabled it. if not (add_rules or delete_rules): for rule in access_rules: self._allow_access(context, share, rule) def delete_share(self, context, share, share_server=None): extra_specs = share_types.get_extra_specs_from_share(share) data_isolated = extra_specs.get("cephfs:data_isolated", False) self.volume_client.delete_volume(self._share_path(share), data_isolated=data_isolated) self.volume_client.purge_volume(self._share_path(share), data_isolated=data_isolated) def ensure_share(self, context, share, share_server=None): # Creation is idempotent return self.create_share(context, share, share_server) def extend_share(self, share, new_size, share_server=None): LOG.debug("extend_share {id} {size}".format( id=share['id'], size=new_size)) self.volume_client.set_max_bytes(self._share_path(share), self._to_bytes(new_size)) def shrink_share(self, share, new_size, share_server=None): LOG.debug("shrink_share {id} {size}".format( id=share['id'], size=new_size)) new_bytes = self._to_bytes(new_size) used = self.volume_client.get_used_bytes(self._share_path(share)) if used > new_bytes: # While in fact we can "shrink" our volumes to less than their # used bytes (it's just a quota), raise error anyway to avoid # confusing API consumers that might depend on typical shrink # behaviour. raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self.volume_client.set_max_bytes(self._share_path(share), new_bytes) def create_snapshot(self, context, snapshot, share_server=None): self.volume_client.create_snapshot_volume( self._share_path(snapshot['share']), snapshot['name']) def delete_snapshot(self, context, snapshot, share_server=None): self.volume_client.destroy_snapshot_volume( self._share_path(snapshot['share']), snapshot['name']) def create_consistency_group(self, context, cg_dict, share_server=None): self.volume_client.create_group(cg_dict['id']) def delete_consistency_group(self, context, cg_dict, share_server=None): self.volume_client.destroy_group(cg_dict['id']) def delete_cgsnapshot(self, context, snap_dict, share_server=None): self.volume_client.destroy_snapshot_group( snap_dict['consistency_group_id'], snap_dict['id']) return None, [] def create_cgsnapshot(self, context, snap_dict, share_server=None): self.volume_client.create_snapshot_group( snap_dict['consistency_group_id'], snap_dict['id']) return None, [] def __del__(self): if self._volume_client: self._volume_client.disconnect() self._volume_client = None manila-2.0.0/manila/share/drivers/ibm/0000775000567000056710000000000012701407265020707 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/ibm/ganesha_utils.py0000664000567000056710000002714112701407112024103 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Ganesha Admin Utilities Ganesha NFS does not provide many tools for automating the process of creating and managing export defintions. This module provides utilities to help parse a specified ganesha config file and return a map containing the export definitions and attributes. A method republishing updated export definitions is also provided. And there are methods for requesting the ganesha server to reload the export definitions. Consider moving this to common location for use by other manila drivers. """ import copy import re import socket import time import netaddr from oslo_log import log import six from manila import exception from manila.i18n import _, _LI from manila import utils LOG = log.getLogger(__name__) # more simple pattern for matching a single avpair per line, # skips lines starting with # comment char AVPATTERN = re.compile('^\s*(?!#)\s*(?P\S+)\s*=\s*(?P\S+)\s*;') # NFS Ganesha v1.5, v2.0 format used here. # TODO(nileshb): Upgrade it to NFS Ganesha 2.1 format. DEFAULT_EXPORT_ATTRS = { 'export_id': 'undefined', 'path': 'undefined', 'fsal': 'undefined', 'root_access': '"*"', 'rw_access': '"*"', 'pseudo': 'undefined', 'anonymous_root_uid': '-2', 'nfs_protocols': '"3,4"', 'transport_protocols': '"UDP,TCP"', 'sectype': '"sys"', 'maxread': '65536', 'maxwrite': '65536', 'prefread': '65536', 'prefwrite': '65536', 'filesystem_id': '192.168', 'tag': 'undefined', } STARTING_EXPORT_ID = 100 def valid_flags(): return DEFAULT_EXPORT_ATTRS.keys() def parse_ganesha_config(configpath): """Parse the specified ganesha configuration. Parse a configuration file and return a list of lines that were found before the first EXPORT block, and a dictionary of exports and their attributes. The input configuration file should be a valid ganesha config file and the export blocks should be the last items in the file. :returns: pre_lines -- List of lines, before the exports clause begins exports -- Dict of exports, indexed with the 'export_id' Hers is a sample output: pre_lines = [ '###################################################', '# Export entries', '###################################################', '', '', '# First export entry'] exports = { '100': { 'anonymous_root_uid': '-2', 'export_id': '100', 'filesystem_id': '192.168', 'fsal': '"GPFS"', 'maxread': '65536', 'maxwrite': '65536', 'nfs_protocols': '"3,4"', 'path': '"/gpfs0/share-0d7df0c0-4792-4e2a-68dc7206a164"', 'prefread': '65536', 'prefwrite': '65536', 'pseudo': '"/gpfs0/share-0d7df0c0-4792-4e2a-68dc7206a164"', 'root_access': '"*"', 'rw_access': '""', 'sectype': '"sys"', 'tag': '"fs100"', 'transport_protocols': '"UDP,TCP"'}, '101': { 'anonymous_root_uid': '-2', 'export_id': '101', 'filesystem_id': '192.168', 'fsal': '"GPFS"', 'maxread': '65536', 'maxwrite': '65536', 'nfs_protocols': '"3,4"', 'path': '"/gpfs0/share-74bee4dc-e07a-44a9-4be619a13fb1"', 'prefread': '65536', 'prefwrite': '65536', 'pseudo': '"/gpfs0/share-74bee4dc-e07a-44a9-4be619a13fb1"', 'root_access': '"*"', 'rw_access': '"172.24.4.4"', 'sectype': '"sys"', 'tag': '"fs101"', 'transport_protocols': '"UDP,TCP"'}} """ export_count = 0 exports = dict() pre_lines = [] with open(configpath) as f: for l in f.readlines(): line = l.strip() if export_count == 0 and line != 'EXPORT': pre_lines.append(line) else: if line == 'EXPORT': export_count += 1 expattrs = dict() try: match_obj = AVPATTERN.match(line) attr = match_obj.group('attr').lower() val = match_obj.group('val') expattrs[attr] = val if attr == 'export_id': exports[val] = expattrs except AttributeError: pass if export_count != len(exports): msg = (_('Invalid export config file %(configpath)s: ' '%(exports)s export clauses found, but ' '%(export_ids)s export_ids.') % {"configpath": configpath, "exports": str(export_count), "export_ids": str(len(exports))}) LOG.error(msg) raise exception.GPFSGaneshaException(msg) return pre_lines, exports def _get_export_by_path(exports, path): for index, export in exports.items(): if export and 'path' in export and export['path'].strip('"\'') == path: return export return None def get_export_by_path(exports, path): """Return the export that matches the specified path.""" return _get_export_by_path(exports, path) def export_exists(exports, path): """Return true if an export exists with the specified path.""" return _get_export_by_path(exports, path) is not None def get_next_id(exports): """Return an export id that is one larger than largest existing id.""" try: next_id = max(map(int, exports.keys())) + 1 except ValueError: next_id = STARTING_EXPORT_ID LOG.debug("Export id = %d", next_id) return next_id def get_export_template(): return copy.copy(DEFAULT_EXPORT_ATTRS) def _convert_ipstring_to_ipn(ipstring): """Transform a single ip string into a list of IPNetwork objects.""" if netaddr.valid_glob(ipstring): ipns = netaddr.glob_to_cidrs(ipstring) else: try: ipns = [netaddr.IPNetwork(ipstring)] except netaddr.AddrFormatError: msg = (_('Invalid IP access string %s.') % ipstring) LOG.error(msg) raise exception.GPFSGaneshaException(msg) return ipns def _format_ips(iptokens): ipaddrs = set() for iptoken in iptokens: ipn_list = _convert_ipstring_to_ipn(iptoken) for ipn in ipn_list: ips = [ip for ip in netaddr.iter_unique_ips(ipn)] ipaddrs = ipaddrs.union(ips) return ipaddrs def format_access_list(access_string, deny_access=None): """Transform access string into a format ganesha understands.""" # handle the case where there is an access string with a trailing comma access_string = access_string.strip(',') iptokens = access_string.split(',') ipaddrs = _format_ips(iptokens) if deny_access: deny_tokens = deny_access.split(',') deny_ipaddrs = _format_ips(deny_tokens) ipaddrs = ipaddrs - deny_ipaddrs ipaddrlist = sorted(list(ipaddrs)) return ','.join([six.text_type(ip) for ip in ipaddrlist]) def _publish_local_config(configpath, pre_lines, exports): tmp_path = '%s.tmp.%s' % (configpath, time.time()) LOG.debug("tmp_path = %s", tmp_path) cpcmd = ['install', '-m', '666', configpath, tmp_path] try: utils.execute(*cpcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) with open(tmp_path, 'w+') as f: for l in pre_lines: f.write('%s\n' % l) for e in exports: f.write('EXPORT\n{\n') for attr in exports[e]: f.write('%s = %s ;\n' % (attr, exports[e][attr])) f.write('}\n') mvcmd = ['mv', tmp_path, configpath] try: utils.execute(*mvcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) LOG.info(_LI('Ganesha config %s published locally.'), configpath) def _publish_remote_config(server, sshlogin, sshkey, configpath): dest = '%s@%s:%s' % (sshlogin, server, configpath) scpcmd = ['scp', '-i', sshkey, configpath, dest] try: utils.execute(*scpcmd, run_as_root=False) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config on remote server. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) LOG.info(_LI('Ganesha config %(path)s published to %(server)s.'), {'path': configpath, 'server': server}) def publish_ganesha_config(servers, sshlogin, sshkey, configpath, pre_lines, exports): """Publish the specified configuration information. Save the existing configuration file and then publish a new ganesha configuration to the specified path. The pre-export lines are written first, followed by the collection of export definitions. """ _publish_local_config(configpath, pre_lines, exports) localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2] for gsvr in servers: if gsvr not in localserver_iplist: _publish_remote_config(gsvr, sshlogin, sshkey, configpath) def reload_ganesha_config(servers, sshlogin, service='ganesha.nfsd'): """Request ganesha server reload updated config.""" # Note: dynamic reload of ganesha config is not enabled # in ganesha v2.0. Therefore, the code uses the ganesha service restart # option to make sure the config changes are reloaded for server in servers: # Until reload is fully implemented and if the reload returns a bad # status revert to service restart instead LOG.info(_LI('Restart service %(service)s on %(server)s to force a ' 'config file reload'), {'service': service, 'server': server}) run_local = True reload_cmd = ['service', service, 'restart'] localserver_iplist = socket.gethostbyname_ex( socket.gethostname())[2] if server not in localserver_iplist: remote_login = sshlogin + '@' + server reload_cmd = ['ssh', remote_login] + reload_cmd run_local = False try: utils.execute(*reload_cmd, run_as_root=run_local) except exception.ProcessExecutionError as e: msg = (_('Could not restart service %(service)s on ' '%(server)s: %(excmsg)s') % {'service': service, 'server': server, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.GPFSGaneshaException(msg) manila-2.0.0/manila/share/drivers/ibm/__init__.py0000664000567000056710000000000012701407107023001 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/ibm/gpfs.py0000664000567000056710000010420212701407107022212 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GPFS Driver for shares. Config Requirements: GPFS file system must have quotas enabled (`mmchfs -Q yes`). Notes: GPFS independent fileset is used for each share. TODO(nileshb): add support for share server creation/deletion/handling. Limitation: While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key' for remote login to the GPFS node must be specified and there must be a passwordless authentication already setup between the Manila share service and the remote GPFS node. """ import abc import copy import math import os import re import socket from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import units import six from manila import exception from manila.i18n import _, _LE, _LI from manila.share import driver from manila.share.drivers.ibm import ganesha_utils from manila import utils LOG = log.getLogger(__name__) # matches multiple comma separated avpairs on a line. values with an embedded # comma must be wrapped in quotation marks AVPATTERN = re.compile(r'\s*(?P\w+)\s*=\s*(?P' '(["][a-zA-Z0-9_, ]+["])|(\w+))\s*[,]?') ERR_FILE_NOT_FOUND = 2 gpfs_share_opts = [ cfg.StrOpt('gpfs_share_export_ip', help='IP to be added to GPFS export string.'), cfg.StrOpt('gpfs_mount_point_base', default='$state_path/mnt', help='Base folder where exported shares are located.'), cfg.StrOpt('gpfs_nfs_server_type', default='KNFS', help=('NFS Server type. Valid choices are "KNFS" (kernel NFS) ' 'or "GNFS" (Ganesha NFS).')), cfg.ListOpt('gpfs_nfs_server_list', help=('A list of the fully qualified NFS server names that ' 'make up the OpenStack Manila configuration.')), cfg.PortOpt('gpfs_ssh_port', default=22, help='GPFS server SSH port.'), cfg.StrOpt('gpfs_ssh_login', help='GPFS server SSH login name.'), cfg.StrOpt('gpfs_ssh_password', secret=True, help='GPFS server SSH login password. ' 'The password is not needed, if \'gpfs_ssh_private_key\' ' 'is configured.'), cfg.StrOpt('gpfs_ssh_private_key', help='Path to GPFS server SSH private key for login.'), cfg.ListOpt('gpfs_share_helpers', default=[ 'KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper', 'GNFS=manila.share.drivers.ibm.gpfs.GNFSHelper', ], help='Specify list of share export helpers.'), cfg.StrOpt('knfs_export_options', default=('rw,sync,no_root_squash,insecure,no_wdelay,' 'no_subtree_check'), help=('Options to use when exporting a share using kernel ' 'NFS server. Note that these defaults can be overridden ' 'when a share is created by passing metadata with key ' 'name export_options.')), ] CONF = cfg.CONF CONF.register_opts(gpfs_share_opts) class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin, driver.ShareDriver): """GPFS Share Driver. Executes commands relating to Shares. Supports creation of shares on a GPFS cluster. API version history: 1.0 - Initial version. 1.1 - Added extend_share functionality """ def __init__(self, *args, **kwargs): """Do initialization.""" super(GPFSShareDriver, self).__init__(False, *args, **kwargs) self._helpers = {} self.configuration.append_config_values(gpfs_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or "IBM Storage System" self.sshpool = None self.ssh_connections = {} self._gpfs_execute = None def do_setup(self, context): """Any initialization the share driver does while starting.""" super(GPFSShareDriver, self).do_setup(context) host = self.configuration.gpfs_share_export_ip localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2] if host in localserver_iplist: # run locally self._gpfs_execute = self._gpfs_local_execute else: self._gpfs_execute = self._gpfs_remote_execute self._setup_helpers() def _gpfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': True}) return utils.execute(*cmd, **kwargs) def _gpfs_remote_execute(self, *cmd, **kwargs): host = self.configuration.gpfs_share_export_ip check_exit_code = kwargs.pop('check_exit_code', True) return self._run_ssh(host, cmd, check_exit_code) def _run_ssh(self, host, cmd_list, ignore_exit_code=None, check_exit_code=True): command = ' '.join(six.moves.shlex_quote(cmd_arg) for cmd_arg in cmd_list) if not self.sshpool: gpfs_ssh_login = self.configuration.gpfs_ssh_login password = self.configuration.gpfs_ssh_password privatekey = self.configuration.gpfs_ssh_private_key gpfs_ssh_port = self.configuration.gpfs_ssh_port ssh_conn_timeout = self.configuration.ssh_conn_timeout min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = utils.SSHPool(host, gpfs_ssh_port, ssh_conn_timeout, gpfs_ssh_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) try: with self.sshpool.item() as ssh: return self._gpfs_ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.') % {'cmd': command, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _gpfs_ssh_execute(self, ssh, cmd, ignore_exit_code=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s' % exit_status) if ((check_exit_code and exit_status != 0) and (ignore_exit_code is None or exit_status not in ignore_exit_code)): raise exception.ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr) def _check_gpfs_state(self): try: out, __ = self._gpfs_execute('mmgetstate', '-Y') except exception.ProcessExecutionError as e: msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') % {'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) lines = out.splitlines() try: state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] except (IndexError, ValueError) as e: msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') % {'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) if gpfs_state != 'active': return False return True def _is_dir(self, path): try: output, __ = self._gpfs_execute('stat', '--format=%F', path, run_as_root=False) except exception.ProcessExecutionError as e: msg = (_('%(path)s is not a directory. Error: %(excmsg)s') % {'path': path, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) return output.strip() == 'directory' def _is_gpfs_path(self, directory): try: self._gpfs_execute('mmlsattr', directory) except exception.ProcessExecutionError as e: msg = (_('%(dir)s is not on GPFS filesystem. Error: %(excmsg)s.') % {'dir': directory, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) return True def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.gpfs_share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper(self._gpfs_execute, self.configuration) def _local_path(self, sharename): """Get local path for a share or share snapshot by name.""" return os.path.join(self.configuration.gpfs_mount_point_base, sharename) def _get_gpfs_device(self): fspath = self.configuration.gpfs_mount_point_base try: (out, __) = self._gpfs_execute('df', fspath) except exception.ProcessExecutionError as e: msg = (_('Failed to get GPFS device for %(fspath)s.' 'Error: %(excmsg)s') % {'fspath': fspath, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) lines = out.splitlines() fs = lines[1].split()[0] return fs def _create_share(self, shareobj): """Create a linked fileset file in GPFS. Note: GPFS file system must have quotas enabled (mmchfs -Q yes). """ sharename = shareobj['name'] sizestr = '%sG' % shareobj['size'] sharepath = self._local_path(sharename) fsdev = self._get_gpfs_device() # create fileset for the share, link it to root path and set max size try: self._gpfs_execute('mmcrfileset', fsdev, sharename, '--inode-space', 'new') except exception.ProcessExecutionError as e: msg = (_('Failed to create fileset on %(fsdev)s for ' 'the share %(sharename)s. Error: %(excmsg)s.') % {'fsdev': fsdev, 'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute('mmlinkfileset', fsdev, sharename, '-J', sharepath) except exception.ProcessExecutionError as e: msg = (_('Failed to link fileset for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute('mmsetquota', '-j', sharename, '-h', sizestr, fsdev) except exception.ProcessExecutionError as e: msg = (_('Failed to set quota for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute('chmod', '777', sharepath) except exception.ProcessExecutionError as e: msg = (_('Failed to set permissions for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _delete_share(self, shareobj): """Remove container by removing GPFS fileset.""" sharename = shareobj['name'] fsdev = self._get_gpfs_device() # ignore error, when the fileset does not exist # it may happen, when the share creation failed, the share is in # 'error' state, and the fileset was never created # we want to ignore that error condition while deleting the fileset, # i.e. 'Fileset name share-xyz not found', with error code '2' # and mark the deletion successful # ignore_exit_code = [ERR_FILE_NOT_FOUND] # unlink and delete the share's fileset try: self._gpfs_execute('mmunlinkfileset', fsdev, sharename, '-f') except exception.ProcessExecutionError as e: msg = (_('Failed unlink fileset for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute('mmdelfileset', fsdev, sharename, '-f') except exception.ProcessExecutionError as e: msg = (_('Failed delete fileset for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _get_available_capacity(self, path): """Calculate available space on path.""" try: out, __ = self._gpfs_execute('df', '-P', '-B', '1', path) except exception.ProcessExecutionError as e: msg = (_('Failed to check available capacity for %(path)s.' 'Error: %(excmsg)s.') % {'path': path, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _create_share_snapshot(self, snapshot): """Create a snapshot of the share.""" sharename = snapshot['share_name'] snapshotname = snapshot['name'] fsdev = self._get_gpfs_device() LOG.debug("sharename = %s, snapshotname = %s, fsdev = %s", (sharename, snapshotname, fsdev)) try: self._gpfs_execute('mmcrsnapshot', fsdev, snapshot['name'], '-j', sharename) except exception.ProcessExecutionError as e: msg = (_('Failed to create snapshot %(snapshot)s. ' 'Error: %(excmsg)s.') % {'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _delete_share_snapshot(self, snapshot): """Delete a snapshot of the share.""" sharename = snapshot['share_name'] fsdev = self._get_gpfs_device() try: self._gpfs_execute('mmdelsnapshot', fsdev, snapshot['name'], '-j', sharename) except exception.ProcessExecutionError as e: msg = (_('Failed to delete snapshot %(snapshot)s. ' 'Error: %(excmsg)s.') % {'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _create_share_from_snapshot(self, share, snapshot, share_path): """Create share from a share snapshot.""" self._create_share(share) snapshot_path = self._get_snapshot_path(snapshot) snapshot_path = snapshot_path + "/" try: self._gpfs_execute('rsync', '-rp', snapshot_path, share_path) except exception.ProcessExecutionError as e: msg = (_('Failed to create share %(share)s from ' 'snapshot %(snapshot)s. Error: %(excmsg)s.') % {'share': share['name'], 'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _extend_share(self, shareobj, new_size): sharename = shareobj['name'] sizestr = '%sG' % new_size fsdev = self._get_gpfs_device() try: self._gpfs_execute('mmsetquota', '-j', sharename, '-h', sizestr, fsdev) except exception.ProcessExecutionError as e: msg = (_('Failed to set quota for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def get_network_allocations_number(self): return 0 def create_share(self, ctx, share, share_server=None): """Create GPFS directory that will be represented as share.""" self._create_share(share) share_path = self._get_share_path(share) location = self._get_helper(share).create_export(share_path) return location def create_share_from_snapshot(self, ctx, share, snapshot, share_server=None): """Is called to create share from a snapshot.""" share_path = self._get_share_path(share) self._create_share_from_snapshot(share, snapshot, share_path) location = self._get_helper(share).create_export(share_path) return location def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" self._create_share_snapshot(snapshot) def delete_share(self, ctx, share, share_server=None): """Remove and cleanup share storage.""" location = self._get_share_path(share) self._get_helper(share).remove_export(location, share) self._delete_share(share) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" self._delete_share_snapshot(snapshot) def extend_share(self, share, new_size, share_server=None): """Extends the quota on the share fileset.""" self._extend_share(share, new_size) def ensure_share(self, ctx, share, share_server=None): """Ensure that storage are mounted and exported.""" def allow_access(self, ctx, share, access, share_server=None): """Allow access to the share.""" location = self._get_share_path(share) self._get_helper(share).allow_access(location, share, access['access_type'], access['access_to']) def deny_access(self, ctx, share, access, share_server=None): """Deny access to the share.""" location = self._get_share_path(share) self._get_helper(share).deny_access(location, share, access['access_type'], access['access_to']) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self._check_gpfs_state(): msg = (_('GPFS is not active.')) LOG.error(msg) raise exception.GPFSException(msg) if not self.configuration.gpfs_share_export_ip: msg = (_('gpfs_share_export_ip must be specified.')) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) gpfs_base_dir = self.configuration.gpfs_mount_point_base if not gpfs_base_dir.startswith('/'): msg = (_('%s must be an absolute path.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if not self._is_dir(gpfs_base_dir): msg = (_('%s is not a directory.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if not self._is_gpfs_path(gpfs_base_dir): msg = (_('%s is not on GPFS. Perhaps GPFS not mounted.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if self.configuration.gpfs_nfs_server_type not in ['KNFS', 'GNFS']: msg = (_('Invalid gpfs_nfs_server_type value: %s. ' 'Valid values are: "KNFS", "GNFS".') % self.configuration.gpfs_nfs_server_type) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) if self.configuration.gpfs_nfs_server_list is None: msg = (_('Missing value for gpfs_nfs_server_list.')) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = dict( share_backend_name=self.backend_name, vendor_name='IBM', storage_protocol='NFS', reserved_percentage=self.configuration.reserved_share_percentage) free, capacity = self._get_available_capacity( self.configuration.gpfs_mount_point_base) data['total_capacity_gb'] = math.ceil(capacity / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) super(GPFSShareDriver, self)._update_share_stats(data) def _get_helper(self, share): if share['share_proto'] == 'NFS': return self._helpers[self.configuration.gpfs_nfs_server_type] else: msg = (_('Share protocol %s not supported by GPFS driver.') % share['share_proto']) LOG.error(msg) raise exception.InvalidShare(reason=msg) def _get_share_path(self, share): """Returns share path on storage provider.""" return os.path.join(self.configuration.gpfs_mount_point_base, share['name']) def _get_snapshot_path(self, snapshot): """Returns share path on storage provider.""" snapshot_dir = ".snapshots" return os.path.join(self.configuration.gpfs_mount_point_base, snapshot["share_name"], snapshot_dir, snapshot["name"]) @six.add_metaclass(abc.ABCMeta) class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, config_object): self.configuration = config_object self._execute = execute def create_export(self, local_path): """Construct location of new export.""" return ':'.join([self.configuration.gpfs_share_export_ip, local_path]) @abc.abstractmethod def remove_export(self, local_path, share): """Remove export.""" @abc.abstractmethod def allow_access(self, local_path, share, access_type, access): """Allow access to the host.""" @abc.abstractmethod def deny_access(self, local_path, share, access_type, access, force=False): """Deny access to the host.""" class KNFSHelper(NASHelperBase): """Wrapper for Kernel NFS Commands.""" def __init__(self, execute, config_object): super(KNFSHelper, self).__init__(execute, config_object) self._execute = execute try: self._execute('exportfs', check_exit_code=True, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('NFS server not found. Error: %s.') % e) LOG.error(msg) raise exception.GPFSException(msg) def _publish_access(self, *cmd): for server in self.configuration.gpfs_nfs_server_list: localserver_iplist = socket.gethostbyname_ex( socket.gethostname())[2] run_local = True if server not in localserver_iplist: sshlogin = self.configuration.gpfs_ssh_login remote_login = sshlogin + '@' + server cmd = ['ssh', remote_login] + list(cmd) run_local = False try: utils.execute(*cmd, run_as_root=run_local, check_exit_code=True) except exception.ProcessExecutionError: raise def _get_export_options(self, share): """Set various export attributes for share.""" metadata = share.get('share_metadata') options = None if metadata: for item in metadata: if item['key'] == 'export_options': options = item['value'] else: msg = (_('Unknown metadata key %s.') % item['key']) LOG.error(msg) raise exception.InvalidInput(reason=msg) if not options: options = self.configuration.knfs_export_options return options def remove_export(self, local_path, share): """Remove export.""" def allow_access(self, local_path, share, access_type, access): """Allow access to one or more vm instances.""" if access_type != 'ip': raise exception.InvalidShareAccess('Only ip access type ' 'supported.') # check if present in export try: out, __ = self._execute('exportfs', run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed to check exports on the systems. ' ' Error: %s.') % e) LOG.error(msg) raise exception.GPFSException(msg) out = re.search(re.escape(local_path) + '[\s\n]*' + re.escape(access), out) if out is not None: raise exception.ShareAccessExists(access_type=access_type, access=access) export_opts = self._get_export_options(share) cmd = ['exportfs', '-o', export_opts, ':'.join([access, local_path])] try: self._publish_access(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to allow access for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def deny_access(self, local_path, share, access_type, access, force=False): """Remove access for one or more vm instances.""" cmd = ['exportfs', '-u', ':'.join([access, local_path])] try: self._publish_access(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to deny access for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) class GNFSHelper(NASHelperBase): """Wrapper for Ganesha NFS Commands.""" def __init__(self, execute, config_object): super(GNFSHelper, self).__init__(execute, config_object) self.default_export_options = dict() for m in AVPATTERN.finditer( self.configuration.ganesha_nfs_export_options ): self.default_export_options[m.group('attr')] = m.group('val') def _get_export_options(self, share): """Set various export attributes for share.""" # load default options first - any options passed as share metadata # will take precedence options = copy.copy(self.default_export_options) metadata = share.get('share_metadata') for item in metadata: attr = item['key'] if attr in ganesha_utils.valid_flags(): options[attr] = item['value'] else: LOG.error(_LE('Invalid metadata %(attr)s for share ' '%(share)s.'), {'attr': attr, 'share': share['name']}) return options @utils.synchronized("ganesha-process-req", external=True) def _ganesha_process_request(self, req_type, local_path, share, access_type=None, access=None, force=False): cfgpath = self.configuration.ganesha_config_path gservice = self.configuration.ganesha_service_name gservers = self.configuration.gpfs_nfs_server_list sshlogin = self.configuration.gpfs_ssh_login sshkey = self.configuration.gpfs_ssh_private_key pre_lines, exports = ganesha_utils.parse_ganesha_config(cfgpath) reload_needed = True if (req_type == "allow_access"): export_opts = self._get_export_options(share) # add the new share if it's not already defined if not ganesha_utils.export_exists(exports, local_path): # Add a brand new export definition new_id = ganesha_utils.get_next_id(exports) export = ganesha_utils.get_export_template() export['fsal'] = '"GPFS"' export['export_id'] = new_id export['tag'] = '"fs%s"' % new_id export['path'] = '"%s"' % local_path export['pseudo'] = '"%s"' % local_path export['rw_access'] = ( '"%s"' % ganesha_utils.format_access_list(access) ) for key in export_opts: export[key] = export_opts[key] exports[new_id] = export LOG.info(_LI('Add %(share)s with access from %(access)s'), {'share': share['name'], 'access': access}) else: # Update existing access with new/extended access information export = ganesha_utils.get_export_by_path(exports, local_path) initial_access = export['rw_access'].strip('"') merged_access = ','.join([access, initial_access]) updated_access = ganesha_utils.format_access_list( merged_access ) if initial_access != updated_access: LOG.info(_LI('Update %(share)s with access from ' '%(access)s'), {'share': share['name'], 'access': access}) export['rw_access'] = '"%s"' % updated_access else: LOG.info(_LI('Do not update %(share)s, access from ' '%(access)s already defined'), {'share': share['name'], 'access': access}) reload_needed = False elif (req_type == "deny_access"): export = ganesha_utils.get_export_by_path(exports, local_path) initial_access = export['rw_access'].strip('"') updated_access = ganesha_utils.format_access_list( initial_access, deny_access=access ) if initial_access != updated_access: LOG.info(_LI('Update %(share)s removing access from ' '%(access)s'), {'share': share['name'], 'access': access}) export['rw_access'] = '"%s"' % updated_access else: LOG.info(_LI('Do not update %(share)s, access from %(access)s ' 'already removed'), {'share': share['name'], 'access': access}) reload_needed = False elif (req_type == "remove_export"): export = ganesha_utils.get_export_by_path(exports, local_path) if export: exports.pop(export['export_id']) LOG.info(_LI('Remove export for %s'), share['name']) else: LOG.info(_LI('Export for %s is not defined in Ganesha ' 'config.'), share['name']) reload_needed = False if reload_needed: # publish config to all servers and reload or restart ganesha_utils.publish_ganesha_config(gservers, sshlogin, sshkey, cfgpath, pre_lines, exports) ganesha_utils.reload_ganesha_config(gservers, sshlogin, gservice) def remove_export(self, local_path, share): """Remove export.""" self._ganesha_process_request("remove_export", local_path, share) def allow_access(self, local_path, share, access_type, access): """Allow access to the host.""" # TODO(nileshb): add support for read only, metadata, and other # access types if access_type != 'ip': raise exception.InvalidShareAccess('Only ip access type ' 'supported.') self._ganesha_process_request("allow_access", local_path, share, access_type, access) def deny_access(self, local_path, share, access_type, access, force=False): """Deny access to the host.""" self._ganesha_process_request("deny_access", local_path, share, access_type, access, force) manila-2.0.0/manila/share/drivers/netapp/0000775000567000056710000000000012701407265021427 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/options.py0000664000567000056710000001327412701407107023476 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contains configuration options for NetApp drivers. Common place to hold configuration options for all NetApp drivers. Options need to be grouped into granular units to be able to be reused by different modules and classes. This does not restrict declaring options in individual modules. If options are not re usable then can be declared in individual modules. It is recommended to Keep options at a single place to ensure re usability and better management of configuration options. """ from oslo_config import cfg netapp_proxy_opts = [ cfg.StrOpt('netapp_storage_family', default='ontap_cluster', help=('The storage family type used on the storage system; ' 'valid values include ontap_cluster for using ' 'clustered Data ONTAP.')), ] netapp_connection_opts = [ cfg.StrOpt('netapp_server_hostname', deprecated_name='netapp_nas_server_hostname', help='The hostname (or IP address) for the storage system.'), cfg.PortOpt('netapp_server_port', help=('The TCP port to use for communication with the storage ' 'system or proxy server. If not specified, Data ONTAP ' 'drivers will use 80 for HTTP and 443 for HTTPS.')), ] netapp_transport_opts = [ cfg.StrOpt('netapp_transport_type', deprecated_name='netapp_nas_transport_type', default='http', help=('The transport protocol used when communicating with ' 'the storage system or proxy server. Valid values are ' 'http or https.')), ] netapp_basicauth_opts = [ cfg.StrOpt('netapp_login', deprecated_name='netapp_nas_login', help=('Administrative user account name used to access the ' 'storage system.')), cfg.StrOpt('netapp_password', deprecated_name='netapp_nas_password', help=('Password for the administrative user account ' 'specified in the netapp_login option.'), secret=True), ] netapp_provisioning_opts = [ cfg.StrOpt('netapp_volume_name_template', deprecated_name='netapp_nas_volume_name_template', help='NetApp volume name template.', default='share_%(share_id)s'), cfg.StrOpt('netapp_vserver_name_template', default='os_%s', help='Name template to use for new Vserver.'), cfg.StrOpt('netapp_port_name_search_pattern', default='(.*)', help='Pattern for overriding the selection of network ports ' 'on which to create Vserver LIFs.'), cfg.StrOpt('netapp_lif_name_template', default='os_%(net_allocation_id)s', help='Logical interface (LIF) name template'), cfg.StrOpt('netapp_aggregate_name_search_pattern', default='(.*)', help='Pattern for searching available aggregates ' 'for provisioning.'), cfg.StrOpt('netapp_root_volume_aggregate', help='Name of aggregate to create Vserver root volumes on. ' 'This option only applies when the option ' 'driver_handles_share_servers is set to True.'), cfg.StrOpt('netapp_root_volume', deprecated_name='netapp_root_volume_name', default='root', help='Root volume name.'), cfg.IntOpt('netapp_volume_snapshot_reserve_percent', min=0, max=90, default=5, help='The percentage of share space set aside as reserve for ' 'snapshot usage; valid values range from 0 to 90.'), ] netapp_cluster_opts = [ cfg.StrOpt('netapp_vserver', help=('This option specifies the Storage Virtual Machine ' '(i.e. Vserver) name on the storage cluster on which ' 'provisioning of file storage shares should occur. This ' 'option should only be specified when the option ' 'driver_handles_share_servers is set to False (i.e. the ' 'driver is managing shares on a single pre-configured ' 'Vserver).')), ] netapp_support_opts = [ cfg.StrOpt('netapp_trace_flags', help=('Comma-separated list of options that control which ' 'trace info is written to the debug logs. Values ' 'include method and api.')), ] netapp_replication_opts = [ cfg.IntOpt('netapp_snapmirror_quiesce_timeout', min=0, default=3600, # One Hour help='The maximum time in seconds to wait for existing ' 'snapmirror transfers to complete before aborting when ' 'promoting a replica.'), ] CONF = cfg.CONF CONF.register_opts(netapp_proxy_opts) CONF.register_opts(netapp_connection_opts) CONF.register_opts(netapp_transport_opts) CONF.register_opts(netapp_basicauth_opts) CONF.register_opts(netapp_provisioning_opts) CONF.register_opts(netapp_support_opts) CONF.register_opts(netapp_replication_opts) manila-2.0.0/manila/share/drivers/netapp/utils.py0000664000567000056710000001664412701407107023147 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for NetApp drivers.""" import collections import decimal import platform from oslo_concurrency import processutils as putils from oslo_log import log import six from manila import exception from manila.i18n import _, _LI, _LW from manila import version LOG = log.getLogger(__name__) VALID_TRACE_FLAGS = ['method', 'api'] TRACE_METHOD = False TRACE_API = False def validate_driver_instantiation(**kwargs): """Checks if a driver is instantiated other than by the unified driver. Helps check direct instantiation of netapp drivers. Call this function in every netapp block driver constructor. """ if kwargs and kwargs.get('netapp_mode') == 'proxy': return LOG.warning(_LW('Please use NetAppDriver in the configuration file ' 'to load the driver instead of directly specifying ' 'the driver module name.')) def check_flags(required_flags, configuration): """Ensure that the flags we care about are set.""" for flag in required_flags: if getattr(configuration, flag, None) is None: msg = _('Configuration value %s is not set.') % flag raise exception.InvalidInput(reason=msg) def round_down(value, precision): """Round a number downward using a specified level of precision. Example: round_down(float(total_space_in_bytes) / units.Gi, '0.01') """ return float(decimal.Decimal(six.text_type(value)).quantize( decimal.Decimal(precision), rounding=decimal.ROUND_DOWN)) def setup_tracing(trace_flags_string): global TRACE_METHOD global TRACE_API TRACE_METHOD = False TRACE_API = False if trace_flags_string: flags = trace_flags_string.split(',') flags = [flag.strip() for flag in flags] for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)): LOG.warning(_LW('Invalid trace flag: %s') % invalid_flag) TRACE_METHOD = 'method' in flags TRACE_API = 'api' in flags def trace(f): def trace_wrapper(self, *args, **kwargs): if TRACE_METHOD: LOG.debug('Entering method %s', f.__name__) result = f(self, *args, **kwargs) if TRACE_METHOD: LOG.debug('Leaving method %s', f.__name__) return result return trace_wrapper def convert_to_list(value): if value is None: return [] elif isinstance(value, six.string_types): return [value] elif isinstance(value, collections.Iterable): return list(value) else: return [value] class OpenStackInfo(object): """OS/distribution, release, and version. NetApp uses these fields as content for EMS log entry. """ PACKAGE_NAME = 'python-manila' def __init__(self): self._version = 'unknown version' self._release = 'unknown release' self._vendor = 'unknown vendor' self._platform = 'unknown platform' def _update_version_from_version_string(self): try: self._version = version.version_info.version_string() except Exception: pass def _update_release_from_release_string(self): try: self._release = version.version_info.release_string() except Exception: pass def _update_platform(self): try: self._platform = platform.platform() except Exception: pass @staticmethod def _get_version_info_version(): return version.version_info.version @staticmethod def _get_version_info_release(): return version.version_info.release def _update_info_from_version_info(self): try: ver = self._get_version_info_version() if ver: self._version = ver except Exception: pass try: rel = self._get_version_info_release() if rel: self._release = rel except Exception: pass # RDO, RHEL-OSP, Mirantis on Redhat, SUSE. def _update_info_from_rpm(self): LOG.debug('Trying rpm command.') try: out, err = putils.execute("rpm", "-q", "--queryformat", "'%{version}\t%{release}\t%{vendor}'", self.PACKAGE_NAME) if not out: LOG.info(_LI('No rpm info found for %(pkg)s package.') % { 'pkg': self.PACKAGE_NAME}) return False parts = out.split() self._version = parts[0] self._release = parts[1] self._vendor = ' '.join(parts[2::]) return True except Exception as e: LOG.info(_LI('Could not run rpm command: %(msg)s.') % { 'msg': e}) return False # Ubuntu, Mirantis on Ubuntu. def _update_info_from_dpkg(self): LOG.debug('Trying dpkg-query command.') try: _vendor = None out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: LOG.info(_LI( 'No dpkg-query info found for %(pkg)s package.') % { 'pkg': self.PACKAGE_NAME}) return False # Debian format: [epoch:]upstream_version[-debian_revision] deb_version = out # In case epoch or revision is missing, copy entire string. _release = deb_version if ':' in deb_version: deb_epoch, upstream_version = deb_version.split(':') _release = upstream_version if '-' in deb_version: deb_revision = deb_version.split('-')[1] _vendor = deb_revision self._release = _release if _vendor: self._vendor = _vendor return True except Exception as e: LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % { 'msg': e}) return False def _update_openstack_info(self): self._update_version_from_version_string() self._update_release_from_release_string() self._update_platform() # Some distributions override with more meaningful information. self._update_info_from_version_info() # See if we have still more targeted info from rpm or apt. found_package = self._update_info_from_rpm() if not found_package: self._update_info_from_dpkg() def info(self): self._update_openstack_info() return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % { 'version': self._version, 'release': self._release, 'vendor': self._vendor, 'platform': self._platform} manila-2.0.0/manila/share/drivers/netapp/__init__.py0000664000567000056710000000000012701407107023521 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/common.py0000664000567000056710000001116412701407107023267 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unified driver for NetApp storage systems. Supports multiple storage systems of different families and driver modes. """ from oslo_log import log from oslo_utils import importutils from manila import exception from manila.i18n import _, _LI from manila.share import driver from manila.share.drivers.netapp import options from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) MULTI_SVM = 'multi_svm' SINGLE_SVM = 'single_svm' DATAONTAP_CMODE_PATH = 'manila.share.drivers.netapp.dataontap.cluster_mode' # Add new drivers here, no other code changes required. NETAPP_UNIFIED_DRIVER_REGISTRY = { 'ontap_cluster': { MULTI_SVM: DATAONTAP_CMODE_PATH + '.drv_multi_svm.NetAppCmodeMultiSvmShareDriver', SINGLE_SVM: DATAONTAP_CMODE_PATH + '.drv_single_svm.NetAppCmodeSingleSvmShareDriver', }, } NETAPP_UNIFIED_DRIVER_DEFAULT_MODE = { 'ontap_cluster': MULTI_SVM, } class NetAppDriver(object): """"NetApp unified share storage driver. Acts as a factory to create NetApp storage drivers based on the storage family and driver mode configured. """ REQUIRED_FLAGS = ['netapp_storage_family', 'driver_handles_share_servers'] def __new__(cls, *args, **kwargs): config = kwargs.get('configuration', None) if not config: raise exception.InvalidInput( reason=_('Required configuration not found.')) config.append_config_values(driver.share_opts) config.append_config_values(options.netapp_proxy_opts) na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) app_version = na_utils.OpenStackInfo().info() LOG.info(_LI('OpenStack OS Version Info: %s'), app_version) kwargs['app_version'] = app_version driver_mode = NetAppDriver._get_driver_mode( config.netapp_storage_family, config.driver_handles_share_servers) return NetAppDriver._create_driver(config.netapp_storage_family, driver_mode, *args, **kwargs) @staticmethod def _get_driver_mode(storage_family, driver_handles_share_servers): if driver_handles_share_servers is None: driver_mode = NETAPP_UNIFIED_DRIVER_DEFAULT_MODE.get( storage_family.lower()) if driver_mode: LOG.debug('Default driver mode %s selected.', driver_mode) else: raise exception.InvalidInput( reason=_('Driver mode was not specified and a default ' 'value could not be determined from the ' 'specified storage family.')) elif driver_handles_share_servers: driver_mode = MULTI_SVM else: driver_mode = SINGLE_SVM return driver_mode @staticmethod def _create_driver(storage_family, driver_mode, *args, **kwargs): """"Creates an appropriate driver based on family and mode.""" storage_family = storage_family.lower() fmt = {'storage_family': storage_family, 'driver_mode': driver_mode} LOG.info(_LI('Requested unified config: %(storage_family)s and ' '%(driver_mode)s.') % fmt) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) if family_meta is None: raise exception.InvalidInput( reason=_('Storage family %s is not supported.') % storage_family) driver_loc = family_meta.get(driver_mode) if driver_loc is None: raise exception.InvalidInput( reason=_('Driver mode %(driver_mode)s is not supported ' 'for storage family %(storage_family)s.') % fmt) kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) LOG.info(_LI('NetApp driver of family %(storage_family)s and mode ' '%(driver_mode)s loaded.') % fmt) return driver manila-2.0.0/manila/share/drivers/netapp/dataontap/0000775000567000056710000000000012701407265023402 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/protocols/0000775000567000056710000000000012701407265025426 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py0000664000567000056710000001677612701407107027751 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp cDOT NFS protocol helper class. """ import uuid import netaddr from oslo_log import log import six from manila.common import constants from manila import exception from manila.i18n import _, _LI from manila.share.drivers.netapp.dataontap.protocols import base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppCmodeNFSHelper(base.NetAppBaseHelper): """NetApp cDOT NFS protocol helper class.""" @na_utils.trace def create_share(self, share, share_name): """Creates NFS share.""" self._client.clear_nfs_export_policy_for_volume(share_name) self._ensure_export_policy(share, share_name) export_path = self._client.get_volume_junction_path(share_name) # Return a callback that may be used for generating export paths # for this share. return (lambda export_address, export_path=export_path: ':'.join([export_address, export_path])) @na_utils.trace @base.access_rules_synchronized def delete_share(self, share, share_name): """Deletes NFS share.""" LOG.debug('Deleting NFS export policy for share %s', share['id']) export_policy_name = self._get_export_policy_name(share) self._client.clear_nfs_export_policy_for_volume(share_name) self._client.soft_delete_nfs_export_policy(export_policy_name) @na_utils.trace @base.access_rules_synchronized def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" # Ensure rules are valid for rule in rules: self._validate_access_rule(rule) # Sort rules by ascending network size new_rules = {rule['access_to']: rule['access_level'] for rule in rules} addresses = self._get_sorted_access_rule_addresses(new_rules) # Ensure current export policy has the name we expect self._ensure_export_policy(share, share_name) export_policy_name = self._get_export_policy_name(share) # Make temp policy names so this non-atomic workflow remains resilient # across process interruptions. temp_new_export_policy_name = self._get_temp_export_policy_name() temp_old_export_policy_name = self._get_temp_export_policy_name() # Create new export policy self._client.create_nfs_export_policy(temp_new_export_policy_name) # Add new rules to new policy for address in addresses: self._client.add_nfs_export_rule( temp_new_export_policy_name, address, self._is_readonly(new_rules[address])) # Rename policy currently in force LOG.info(_LI('Renaming NFS export policy for share %(share)s to ' '%(policy)s.') % {'share': share_name, 'policy': temp_old_export_policy_name}) self._client.rename_nfs_export_policy(export_policy_name, temp_old_export_policy_name) # Switch share to the new policy LOG.info(_LI('Setting NFS export policy for share %(share)s to ' '%(policy)s.') % {'share': share_name, 'policy': temp_new_export_policy_name}) self._client.set_nfs_export_policy_for_volume( share_name, temp_new_export_policy_name) # Delete old policy self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name) # Rename new policy to its final name LOG.info(_LI('Renaming NFS export policy for share %(share)s to ' '%(policy)s.') % {'share': share_name, 'policy': export_policy_name}) self._client.rename_nfs_export_policy(temp_new_export_policy_name, export_policy_name) @na_utils.trace def _validate_access_rule(self, rule): """Checks whether access rule type and level are valid.""" if rule['access_type'] != 'ip': msg = _("Clustered Data ONTAP supports only 'ip' type for share " "access rules with NFS protocol.") raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] not in constants.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=rule['access_level']) @na_utils.trace def _get_sorted_access_rule_addresses(self, rules): """Given a dict of access rules, sort by increasing network size.""" networks = sorted([self._get_network_object_from_rule(rule) for rule in rules], reverse=True) return [six.text_type(network) for network in networks] def _get_network_object_from_rule(self, rule): """Get most appropriate netaddr object for address or network rule.""" try: return netaddr.IPAddress(rule) except ValueError: return netaddr.IPNetwork(rule) @na_utils.trace def get_target(self, share): """Returns ID of target OnTap device based on export location.""" return self._get_export_location(share)[0] @na_utils.trace def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" _, volume_junction_path = self._get_export_location(share) volume = self._client.get_volume_at_junction_path(volume_junction_path) return volume.get('name') if volume else None @staticmethod def _get_export_location(share): """Returns IP address and export location of an NFS share.""" export_location = share['export_location'] or ':' return export_location.rsplit(':', 1) @staticmethod def _get_temp_export_policy_name(): """Builds export policy name for an NFS share.""" return 'temp_' + six.text_type(uuid.uuid1()).replace('-', '_') @staticmethod def _get_export_policy_name(share): """Builds export policy name for an NFS share.""" return 'policy_' + share['id'].replace('-', '_') @na_utils.trace def _ensure_export_policy(self, share, share_name): """Ensures a flexvol/share has an export policy. This method ensures a flexvol has an export policy with a name containing the share ID. For legacy reasons, this may not always be the case. """ expected_export_policy = self._get_export_policy_name(share) actual_export_policy = self._client.get_nfs_export_policy_for_volume( share_name) if actual_export_policy == expected_export_policy: return elif actual_export_policy == 'default': self._client.create_nfs_export_policy(expected_export_policy) self._client.set_nfs_export_policy_for_volume( share_name, expected_export_policy) else: self._client.rename_nfs_export_policy(actual_export_policy, expected_export_policy) manila-2.0.0/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py0000664000567000056710000001462312701407112030070 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp cDOT CIFS protocol helper class. """ import re from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.protocols import base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppCmodeCIFSHelper(base.NetAppBaseHelper): """NetApp cDOT CIFS protocol helper class.""" @na_utils.trace def create_share(self, share, share_name): """Creates CIFS share on Data ONTAP Vserver.""" self._client.create_cifs_share(share_name) self._client.remove_cifs_share_access(share_name, 'Everyone') # Return a callback that may be used for generating export paths # for this share. return (lambda export_address, share_name=share_name: r'\\%s\%s' % (export_address, share_name)) @na_utils.trace def delete_share(self, share, share_name): """Deletes CIFS share on Data ONTAP Vserver.""" host_ip, share_name = self._get_export_location(share) self._client.remove_cifs_share(share_name) @na_utils.trace @base.access_rules_synchronized def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" # Ensure rules are valid for rule in rules: self._validate_access_rule(rule) new_rules = {rule['access_to']: rule['access_level'] for rule in rules} # Get rules from share existing_rules = self._get_access_rules(share, share_name) # Update rules in an order that will prevent transient disruptions self._handle_added_rules(share_name, existing_rules, new_rules) self._handle_ro_to_rw_rules(share_name, existing_rules, new_rules) self._handle_rw_to_ro_rules(share_name, existing_rules, new_rules) self._handle_deleted_rules(share_name, existing_rules, new_rules) @na_utils.trace def _validate_access_rule(self, rule): """Checks whether access rule type and level are valid.""" if rule['access_type'] != 'user': msg = _("Clustered Data ONTAP supports only 'user' type for " "share access rules with CIFS protocol.") raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] not in constants.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=rule['access_level']) @na_utils.trace def _handle_added_rules(self, share_name, existing_rules, new_rules): """Updates access rules added between two rule sets.""" added_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if user_or_group not in existing_rules } for user_or_group, permission in added_rules.items(): self._client.add_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_ro_to_rw_rules(self, share_name, existing_rules, new_rules): """Updates access rules modified (RO-->RW) between two rule sets.""" modified_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if (user_or_group in existing_rules and permission == constants.ACCESS_LEVEL_RW and existing_rules[user_or_group] != 'full_control') } for user_or_group, permission in modified_rules.items(): self._client.modify_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_rw_to_ro_rules(self, share_name, existing_rules, new_rules): """Returns access rules modified (RW-->RO) between two rule sets.""" modified_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if (user_or_group in existing_rules and permission == constants.ACCESS_LEVEL_RO and existing_rules[user_or_group] != 'read') } for user_or_group, permission in modified_rules.items(): self._client.modify_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_deleted_rules(self, share_name, existing_rules, new_rules): """Returns access rules deleted between two rule sets.""" deleted_rules = { user_or_group: permission for user_or_group, permission in existing_rules.items() if user_or_group not in new_rules } for user_or_group, permission in deleted_rules.items(): self._client.remove_cifs_share_access(share_name, user_or_group) @na_utils.trace def _get_access_rules(self, share, share_name): """Returns the list of access rules known to the backend storage.""" return self._client.get_cifs_share_access(share_name) @na_utils.trace def get_target(self, share): """Returns OnTap target IP based on share export location.""" return self._get_export_location(share)[0] @na_utils.trace def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" _, share_name = self._get_export_location(share) return share_name @staticmethod def _get_export_location(share): """Returns host ip and share name for a given CIFS share.""" export_location = share['export_location'] or '\\\\\\' regex = r'^(?:\\\\|//)(?P.*)(?:\\|/)(?P.*)$' match = re.match(regex, export_location) if match: return match.group('host_ip'), match.group('share_name') else: return '', '' manila-2.0.0/manila/share/drivers/netapp/dataontap/protocols/__init__.py0000664000567000056710000000000012701407107027520 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/protocols/base.py0000664000567000056710000000425612701407107026714 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Abstract base class for NetApp NAS protocol helper classes. """ import abc import six from manila.common import constants from manila import utils def access_rules_synchronized(f): """Decorator for synchronizing share access rule modification methods.""" def wrapped_func(self, *args, **kwargs): # The first argument is always a share, which has an ID key = "share-access-%s" % args[0]['id'] @utils.synchronized(key) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func @six.add_metaclass(abc.ABCMeta) class NetAppBaseHelper(object): """Interface for protocol-specific NAS drivers.""" def __init__(self): self._client = None def set_client(self, client): self._client = client def _is_readonly(self, access_level): """Returns whether an access rule specifies read-only access.""" return access_level == constants.ACCESS_LEVEL_RO @abc.abstractmethod def create_share(self, share, share_name): """Creates NAS share.""" @abc.abstractmethod def delete_share(self, share, share_name): """Deletes NAS share.""" @abc.abstractmethod def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" @abc.abstractmethod def get_target(self, share): """Returns host where the share located.""" @abc.abstractmethod def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/0000775000567000056710000000000012701407265026067 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_single_svm.py0000664000567000056710000001220012701407107031423 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT single-SVM storage driver library. This library extends the abstract base library and completes the single-SVM functionality needed by the cDOT single-SVM Manila driver. This library variant uses a single Data ONTAP storage virtual machine (i.e. 'vserver') as defined in manila.conf to provision shares. """ import re from oslo_log import log from manila import exception from manila.i18n import _, _LI from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppCmodeSingleSVMFileStorageLibrary( lib_base.NetAppCmodeFileStorageLibrary): def __init__(self, driver_name, **kwargs): super(NetAppCmodeSingleSVMFileStorageLibrary, self).__init__( driver_name, **kwargs) self._vserver = self.configuration.netapp_vserver @na_utils.trace def check_for_setup_error(self): # Ensure vserver is specified in configuration. if not self._vserver: msg = _('Vserver must be specified in the configuration ' 'when the driver is not managing share servers.') raise exception.InvalidInput(reason=msg) # Ensure vserver exists. if not self._client.vserver_exists(self._vserver): raise exception.VserverNotFound(vserver=self._vserver) # If we have vserver credentials, ensure the vserver they connect # to matches the vserver specified in the configuration. if not self._have_cluster_creds: if self._vserver not in self._client.list_vservers(): msg = _('Vserver specified in the configuration does not ' 'match supplied credentials.') raise exception.InvalidInput(reason=msg) # Ensure one or more aggregates are available to the vserver. if not self._find_matching_aggregates(): msg = _('No aggregates are available to Vserver %s for ' 'provisioning shares. Ensure that one or more aggregates ' 'are assigned to the Vserver and that the configuration ' 'option netapp_aggregate_name_search_pattern is set ' 'correctly.') % self._vserver raise exception.NetAppException(msg) msg = _LI('Using Vserver %(vserver)s for backend %(backend)s with ' '%(creds)s credentials.') msg_args = {'vserver': self._vserver, 'backend': self._backend_name} msg_args['creds'] = ('cluster' if self._have_cluster_creds else 'Vserver') LOG.info(msg % msg_args) super(NetAppCmodeSingleSVMFileStorageLibrary, self).\ check_for_setup_error() @na_utils.trace def _get_vserver(self, share_server=None): if share_server is not None: msg = _('Share server must not be passed to the driver ' 'when the driver is not managing share servers.') raise exception.InvalidParameterValue(err=msg) if not self._vserver: msg = _('Vserver not specified in configuration.') raise exception.InvalidInput(reason=msg) if not self._client.vserver_exists(self._vserver): raise exception.VserverNotFound(vserver=self._vserver) vserver_client = self._get_api_client(self._vserver) return self._vserver, vserver_client @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" vserver_client = self._get_api_client(vserver=self._vserver) vserver_client.prune_deleted_nfs_export_policies() vserver_client.prune_deleted_snapshots() super(NetAppCmodeSingleSVMFileStorageLibrary, self).\ _handle_housekeeping_tasks() @na_utils.trace def _find_matching_aggregates(self): """Find all aggregates match pattern.""" vserver_client = self._get_api_client(vserver=self._vserver) aggregate_names = vserver_client.list_vserver_aggregates() pattern = self.configuration.netapp_aggregate_name_search_pattern return [aggr_name for aggr_name in aggregate_names if re.match(pattern, aggr_name)] @na_utils.trace def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return 0 @na_utils.trace def get_admin_network_allocations_number(self): """Get number of network allocations for creating admin LIFs.""" return 0 manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/__init__.py0000664000567000056710000000000012701407107030161 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py0000664000567000056710000001417612701407107031337 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT multi-SVM storage driver. This driver requires a Data ONTAP (Cluster-mode) storage system with installed CIFS and/or NFS licenses, as well as a FlexClone license. This driver manages share servers, meaning it creates Data ONTAP storage virtual machines (i.e. 'vservers') for each share network for provisioning shares. This driver supports NFS & CIFS protocols. """ from manila.share import driver from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver): """NetApp Cluster-mode multi-SVM share driver.""" DRIVER_NAME = 'NetApp_Cluster_MultiSVM' def __init__(self, *args, **kwargs): super(NetAppCmodeMultiSvmShareDriver, self).__init__( True, *args, **kwargs) self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary( self.DRIVER_NAME, **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def get_pool(self, share): return self.library.get_pool(share) def create_share(self, context, share, **kwargs): return self.library.create_share(context, share, **kwargs) def create_share_from_snapshot(self, context, share, snapshot, **kwargs): return self.library.create_share_from_snapshot(context, share, snapshot, **kwargs) def create_snapshot(self, context, snapshot, **kwargs): self.library.create_snapshot(context, snapshot, **kwargs) def delete_share(self, context, share, **kwargs): self.library.delete_share(context, share, **kwargs) def delete_snapshot(self, context, snapshot, **kwargs): self.library.delete_snapshot(context, snapshot, **kwargs) def extend_share(self, share, new_size, **kwargs): self.library.extend_share(share, new_size, **kwargs) def shrink_share(self, share, new_size, **kwargs): self.library.shrink_share(share, new_size, **kwargs) def create_consistency_group(self, context, cg_dict, **kwargs): return self.library.create_consistency_group(context, cg_dict, **kwargs) def create_consistency_group_from_cgsnapshot(self, context, cg_dict, cgsnapshot_dict, **kwargs): return self.library.create_consistency_group_from_cgsnapshot( context, cg_dict, cgsnapshot_dict, **kwargs) def delete_consistency_group(self, context, cg_dict, **kwargs): return self.library.delete_consistency_group(context, cg_dict, **kwargs) def create_cgsnapshot(self, context, snap_dict, **kwargs): return self.library.create_cgsnapshot(context, snap_dict, **kwargs) def delete_cgsnapshot(self, context, snap_dict, **kwargs): return self.library.delete_cgsnapshot(context, snap_dict, **kwargs) def ensure_share(self, context, share, **kwargs): pass def manage_existing(self, share, driver_options): raise NotImplementedError def unmanage(self, share): raise NotImplementedError def update_access(self, context, share, access_rules, add_rules, delete_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, delete_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats() super(NetAppCmodeMultiSvmShareDriver, self)._update_share_stats( data=data) def get_share_server_pools(self, share_server): return self.library.get_share_server_pools(share_server) def get_network_allocations_number(self): return self.library.get_network_allocations_number() def get_admin_network_allocations_number(self): return self.library.get_admin_network_allocations_number( self.admin_network_api) def _setup_server(self, network_info, metadata=None): return self.library.setup_server(network_info, metadata) def _teardown_server(self, server_details, **kwargs): self.library.teardown_server(server_details, **kwargs) def create_replica(self, context, replica_list, replica, access_rules, replica_snapshots, **kwargs): raise NotImplementedError() def delete_replica(self, context, replica_list, replica_snapshots, replica, **kwargs): raise NotImplementedError() def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): raise NotImplementedError() def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): raise NotImplementedError() def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): raise NotImplementedError() def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): raise NotImplementedError() def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): raise NotImplementedError() manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py0000664000567000056710000001642712701407107031467 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT single-SVM storage driver. This driver requires a Data ONTAP (Cluster-mode) storage system with installed CIFS and/or NFS licenses, as well as a FlexClone license. This driver does not manage share servers, meaning it uses a single Data ONTAP storage virtual machine (i.e. 'vserver') as defined in manila.conf to provision shares. This driver supports NFS & CIFS protocols. """ from manila.share import driver from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver): """NetApp Cluster-mode single-SVM share driver.""" DRIVER_NAME = 'NetApp_Cluster_SingleSVM' def __init__(self, *args, **kwargs): super(NetAppCmodeSingleSvmShareDriver, self).__init__( False, *args, **kwargs) self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( self.DRIVER_NAME, **kwargs) def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def get_pool(self, share): return self.library.get_pool(share) def create_share(self, context, share, **kwargs): return self.library.create_share(context, share, **kwargs) def create_share_from_snapshot(self, context, share, snapshot, **kwargs): return self.library.create_share_from_snapshot(context, share, snapshot, **kwargs) def create_snapshot(self, context, snapshot, **kwargs): return self.library.create_snapshot(context, snapshot, **kwargs) def delete_share(self, context, share, **kwargs): self.library.delete_share(context, share, **kwargs) def delete_snapshot(self, context, snapshot, **kwargs): self.library.delete_snapshot(context, snapshot, **kwargs) def extend_share(self, share, new_size, **kwargs): self.library.extend_share(share, new_size, **kwargs) def shrink_share(self, share, new_size, **kwargs): self.library.shrink_share(share, new_size, **kwargs) def create_consistency_group(self, context, cg_dict, **kwargs): return self.library.create_consistency_group(context, cg_dict, **kwargs) def create_consistency_group_from_cgsnapshot(self, context, cg_dict, cgsnapshot_dict, **kwargs): return self.library.create_consistency_group_from_cgsnapshot( context, cg_dict, cgsnapshot_dict, **kwargs) def delete_consistency_group(self, context, cg_dict, **kwargs): return self.library.delete_consistency_group(context, cg_dict, **kwargs) def create_cgsnapshot(self, context, snap_dict, **kwargs): return self.library.create_cgsnapshot(context, snap_dict, **kwargs) def delete_cgsnapshot(self, context, snap_dict, **kwargs): return self.library.delete_cgsnapshot(context, snap_dict, **kwargs) def ensure_share(self, context, share, **kwargs): pass def manage_existing(self, share, driver_options): return self.library.manage_existing(share, driver_options) def unmanage(self, share): self.library.unmanage(share) def update_access(self, context, share, access_rules, add_rules, delete_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, delete_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats() super(NetAppCmodeSingleSvmShareDriver, self)._update_share_stats( data=data) def get_share_server_pools(self, share_server): return self.library.get_share_server_pools(share_server) def get_network_allocations_number(self): return self.library.get_network_allocations_number() def get_admin_network_allocations_number(self): return self.library.get_admin_network_allocations_number() def _setup_server(self, network_info, metadata=None): return self.library.setup_server(network_info, metadata) def _teardown_server(self, server_details, **kwargs): self.library.teardown_server(server_details, **kwargs) def create_replica(self, context, replica_list, replica, access_rules, replica_snapshots, **kwargs): return self.library.create_replica(context, replica_list, replica, access_rules, replica_snapshots, **kwargs) def delete_replica(self, context, replica_list, replica_snapshots, replica, **kwargs): self.library.delete_replica(context, replica_list, replica, replica_snapshots, **kwargs) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): return self.library.promote_replica(context, replica_list, replica, access_rules, share_server=share_server) def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): return self.library.update_replica_state(context, replica_list, replica, access_rules, replica_snapshots, share_server=share_server) def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): return self.library.update_replicated_snapshot( replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py0000664000567000056710000017506112701407107030206 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT base storage driver library. This library is the abstract base for subclasses that complete the single-SVM or multi-SVM functionality needed by the cDOT Manila drivers. """ import copy import math import socket from oslo_config import cfg from oslo_log import log from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units import six from manila.common import constants from manila import exception from manila.i18n import _, _LE, _LI, _LW from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils LOG = log.getLogger(__name__) CONF = cfg.CONF class NetAppCmodeFileStorageLibrary(object): AUTOSUPPORT_INTERVAL_SECONDS = 3600 # hourly SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes SUPPORTED_PROTOCOLS = ('nfs', 'cifs') # Maps NetApp qualified extra specs keys to corresponding backend API # client library argument keywords. When we expose more backend # capabilities here, we will add them to this map. BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP = { 'netapp:thin_provisioned': 'thin_provisioned', 'netapp:dedup': 'dedup_enabled', 'netapp:compression': 'compression_enabled', } STRING_QUALIFIED_EXTRA_SPECS_MAP = { 'netapp:snapshot_policy': 'snapshot_policy', 'netapp:language': 'language', 'netapp:max_files': 'max_files', } # Maps standard extra spec keys to legacy NetApp keys STANDARD_BOOLEAN_EXTRA_SPECS_MAP = { 'thin_provisioning': 'netapp:thin_provisioned', 'dedupe': 'netapp:dedup', 'compression': 'netapp:compression', } def __init__(self, driver_name, **kwargs): na_utils.validate_driver_instantiation(**kwargs) self.driver_name = driver_name self.private_storage = kwargs['private_storage'] self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_support_opts) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.configuration.append_config_values( na_opts.netapp_provisioning_opts) self.configuration.append_config_values( na_opts.netapp_replication_opts) self._licenses = [] self._client = None self._clients = {} self._ssc_stats = {} self._have_cluster_creds = None self._app_version = kwargs.get('app_version', 'unknown') na_utils.setup_tracing(self.configuration.netapp_trace_flags) self._backend_name = self.configuration.safe_get( 'share_backend_name') or driver_name @na_utils.trace def do_setup(self, context): self._client = self._get_api_client() self._have_cluster_creds = self._client.check_for_cluster_credentials() @na_utils.trace def check_for_setup_error(self): self._licenses = self._get_licenses() self._start_periodic_tasks() def _get_vserver(self, share_server=None): raise NotImplementedError() @na_utils.trace def _get_api_client(self, vserver=None): # Use cached value to prevent calls to system-get-ontapi-version. client = self._clients.get(vserver) if not client: client = client_cmode.NetAppCmodeClient( transport_type=self.configuration.netapp_transport_type, username=self.configuration.netapp_login, password=self.configuration.netapp_password, hostname=self.configuration.netapp_server_hostname, port=self.configuration.netapp_server_port, vserver=vserver, trace=na_utils.TRACE_API) self._clients[vserver] = client return client @na_utils.trace def _get_licenses(self): if not self._have_cluster_creds: LOG.debug('License info not available without cluster credentials') return [] self._licenses = self._client.get_licenses() log_data = { 'backend': self._backend_name, 'licenses': ', '.join(self._licenses), } LOG.info(_LI('Available licenses on %(backend)s ' 'are %(licenses)s.'), log_data) if 'nfs' not in self._licenses and 'cifs' not in self._licenses: msg = _LE('Neither NFS nor CIFS is licensed on %(backend)s') msg_args = {'backend': self._backend_name} LOG.error(msg % msg_args) return self._licenses @na_utils.trace def _start_periodic_tasks(self): # Run the task once in the current thread so prevent a race with # the first invocation of get_share_stats. self._update_ssc_info() # Start the task that updates the slow-changing storage service catalog ssc_periodic_task = loopingcall.FixedIntervalLoopingCall( self._update_ssc_info) ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL_SECONDS, initial_delay=self.SSC_UPDATE_INTERVAL_SECONDS) # Start the task that logs autosupport (EMS) data to the controller ems_periodic_task = loopingcall.FixedIntervalLoopingCall( self._handle_ems_logging) ems_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS, initial_delay=0) # Start the task that runs other housekeeping tasks, such as deletion # of previously soft-deleted storage artifacts. housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall( self._handle_housekeeping_tasks) housekeeping_periodic_task.start( interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0) def _get_backend_share_name(self, share_id): """Get share name according to share name template.""" return self.configuration.netapp_volume_name_template % { 'share_id': share_id.replace('-', '_')} def _get_backend_snapshot_name(self, snapshot_id): """Get snapshot name according to snapshot name template.""" return 'share_snapshot_' + snapshot_id.replace('-', '_') def _get_backend_cg_snapshot_name(self, snapshot_id): """Get snapshot name according to snapshot name template.""" return 'share_cg_snapshot_' + snapshot_id.replace('-', '_') @na_utils.trace def _get_aggregate_space(self): aggregates = self._find_matching_aggregates() if self._have_cluster_creds: return self._client.get_cluster_aggregate_capacities(aggregates) else: return self._client.get_vserver_aggregate_capacities(aggregates) @na_utils.trace def _get_aggregate_node(self, aggregate_name): """Get home node for the specified aggregate, or None.""" if self._have_cluster_creds: return self._client.get_node_for_aggregate(aggregate_name) else: return None @na_utils.trace def get_share_stats(self): """Retrieve stats info from Data ONTAP backend.""" data = { 'share_backend_name': self._backend_name, 'driver_name': self.driver_name, 'vendor_name': 'NetApp', 'driver_version': '1.0', 'netapp_storage_family': 'ontap_cluster', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'consistency_group_support': 'host', 'pools': self._get_pools(), } if (self.configuration.replication_domain and not self.configuration.driver_handles_share_servers): data['replication_type'] = 'dr' data['replication_domain'] = self.configuration.replication_domain return data @na_utils.trace def get_share_server_pools(self, share_server): """Return list of pools related to a particular share server. Note that the multi-SVM cDOT driver assigns all available pools to each Vserver, so there is no need to filter the pools any further by share_server. :param share_server: ShareServer class instance. """ return self._get_pools() @na_utils.trace def _get_pools(self): """Retrieve list of pools available to this backend.""" pools = [] aggr_space = self._get_aggregate_space() for aggr_name in sorted(aggr_space.keys()): total_capacity_gb = na_utils.round_down(float( aggr_space[aggr_name].get('total', 0)) / units.Gi, '0.01') free_capacity_gb = na_utils.round_down(float( aggr_space[aggr_name].get('available', 0)) / units.Gi, '0.01') allocated_capacity_gb = na_utils.round_down(float( aggr_space[aggr_name].get('used', 0)) / units.Gi, '0.01') if total_capacity_gb == 0.0: total_capacity_gb = 'unknown' pool = { 'pool_name': aggr_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'allocated_capacity_gb': allocated_capacity_gb, 'qos': 'False', 'reserved_percentage': 0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], } # Add storage service catalog data. pool_ssc_stats = self._ssc_stats.get(aggr_name) if pool_ssc_stats: pool.update(pool_ssc_stats) pools.append(pool) return pools @na_utils.trace def _handle_ems_logging(self): """Build and send an EMS log message.""" self._client.send_ems_log_message(self._build_ems_log_message()) @na_utils.trace def _build_ems_log_message(self): """Construct EMS Autosupport log message.""" ems_log = { 'computer-name': socket.getfqdn() or 'Manila_node', 'event-id': '0', 'event-source': 'Manila driver %s' % self.driver_name, 'app-version': self._app_version, 'category': 'provisioning', 'event-description': 'OpenStack Manila connected to cluster node', 'log-level': '6', 'auto-support': 'false', } return ems_log @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" def _find_matching_aggregates(self): """Find all aggregates match pattern.""" raise NotImplementedError() @na_utils.trace def _get_helper(self, share): """Returns driver which implements share protocol.""" share_protocol = share['share_proto'].lower() if share_protocol not in self.SUPPORTED_PROTOCOLS: err_msg = _("Invalid NAS protocol supplied: %s.") % share_protocol raise exception.NetAppException(err_msg) self._check_license_for_protocol(share_protocol) if share_protocol == 'nfs': return nfs_cmode.NetAppCmodeNFSHelper() elif share_protocol == 'cifs': return cifs_cmode.NetAppCmodeCIFSHelper() @na_utils.trace def _check_license_for_protocol(self, share_protocol): """Validates protocol license if cluster APIs are accessible.""" if not self._have_cluster_creds: return if share_protocol.lower() not in self._licenses: current_licenses = self._get_licenses() if share_protocol.lower() not in current_licenses: msg_args = { 'protocol': share_protocol, 'host': self.configuration.netapp_server_hostname } msg = _('The protocol %(protocol)s is not licensed on ' 'controller %(host)s') % msg_args LOG.error(msg) raise exception.NetAppException(msg) @na_utils.trace def get_pool(self, share): pool = share_utils.extract_host(share['host'], level='pool') if pool: return pool share_name = self._get_backend_share_name(share['id']) return self._client.get_aggregate_for_volume(share_name) @na_utils.trace def create_share(self, context, share, share_server): """Creates new share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) self._allocate_container(share, vserver_client) return self._create_export(share, share_server, vserver, vserver_client) @na_utils.trace def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Creates new share from snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) self._allocate_container_from_snapshot(share, snapshot, vserver_client) return self._create_export(share, share_server, vserver, vserver_client) @na_utils.trace def _allocate_container(self, share, vserver_client, replica=False): """Create new share on aggregate.""" share_name = self._get_backend_share_name(share['id']) # Get Data ONTAP aggregate name as pool name. pool_name = share_utils.extract_host(share['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the share host field.") raise exception.InvalidHost(reason=msg) extra_specs = share_types.get_extra_specs_from_share(share) extra_specs = self._remap_standard_boolean_extra_specs(extra_specs) self._check_extra_specs_validity(share, extra_specs) provisioning_options = self._get_provisioning_options(extra_specs) if replica: # If this volume is intended to be a replication destination, # create it as the 'data-protection' type provisioning_options['volume_type'] = 'dp' LOG.debug('Creating share %(share)s on pool %(pool)s with ' 'provisioning options %(options)s', {'share': share_name, 'pool': pool_name, 'options': provisioning_options}) vserver_client.create_volume( pool_name, share_name, share['size'], snapshot_reserve=self.configuration. netapp_volume_snapshot_reserve_percent, **provisioning_options) @na_utils.trace def _remap_standard_boolean_extra_specs(self, extra_specs): """Replace standard boolean extra specs with NetApp-specific ones.""" specs = copy.deepcopy(extra_specs) for (key, netapp_key) in self.STANDARD_BOOLEAN_EXTRA_SPECS_MAP.items(): if key in specs: bool_value = share_types.parse_boolean_extra_spec(key, specs[key]) specs[netapp_key] = 'true' if bool_value else 'false' del specs[key] return specs @na_utils.trace def _check_extra_specs_validity(self, share, extra_specs): """Check if the extra_specs have valid values.""" self._check_boolean_extra_specs_validity( share, extra_specs, list(self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) self._check_string_extra_specs_validity(share, extra_specs) @na_utils.trace def _check_string_extra_specs_validity(self, share, extra_specs): """Check if the string_extra_specs have valid values.""" if 'netapp:max_files' in extra_specs: self._check_if_max_files_is_valid(share, extra_specs['netapp:max_files']) @na_utils.trace def _check_if_max_files_is_valid(self, share, value): """Check if max_files has a valid value.""" if int(value) < 0: args = {'value': value, 'key': 'netapp:max_files', 'type_id': share['share_type_id'], 'share_id': share['id']} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s.') raise exception.NetAppException(msg % args) @na_utils.trace def _check_boolean_extra_specs_validity(self, share, specs, keys_of_interest): # cDOT compression requires deduplication. dedup = specs.get('netapp:dedup', None) compression = specs.get('netapp:compression', None) if dedup is not None and compression is not None: if dedup.lower() == 'false' and compression.lower() == 'true': spec = {'netapp:dedup': dedup, 'netapp:compression': compression} type_id = share['share_type_id'] share_id = share['id'] args = {'type_id': type_id, 'share_id': share_id, 'spec': spec} msg = _('Invalid combination of extra_specs in share_type ' '%(type_id)s for share %(share_id)s: %(spec)s: ' 'deduplication must be enabled in order for ' 'compression to be enabled.') raise exception.Invalid(msg % args) """Check if the boolean_extra_specs have valid values.""" # Extra spec values must be (ignoring case) 'true' or 'false'. for key in keys_of_interest: value = specs.get(key) if value is not None and value.lower() not in ['true', 'false']: type_id = share['share_type_id'] share_id = share['id'] arg_map = {'value': value, 'key': key, 'type_id': type_id, 'share_id': share_id} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s.') raise exception.Invalid(msg % arg_map) @na_utils.trace def _get_boolean_provisioning_options(self, specs, boolean_specs_map): """Given extra specs, return corresponding client library kwargs. Build a full set of client library provisioning kwargs, filling in a default value if an explicit value has not been supplied via a corresponding extra spec. Boolean extra spec values are "true" or "false", with missing specs treated as "false". Provisioning kwarg values are True or False. """ # Extract the extra spec keys of concern and their corresponding # kwarg keys as lists. keys_of_interest = list(boolean_specs_map) provisioning_args = [boolean_specs_map[key] for key in keys_of_interest] # Set missing spec values to 'false' for key in keys_of_interest: if key not in specs: specs[key] = 'false' # Build a list of Boolean provisioning arguments from the string # equivalents in the spec values. provisioning_values = [specs[key].lower() == 'true' for key in keys_of_interest] # Combine the list of provisioning args and the list of provisioning # values into a dictionary suitable for use as kwargs when invoking # provisioning methods from the client API library. return dict(zip(provisioning_args, provisioning_values)) @na_utils.trace def _get_string_provisioning_options(self, specs, string_specs_map): """Given extra specs, return corresponding client library kwargs. Build a full set of client library provisioning kwargs, filling in a default value if an explicit value has not been supplied via a corresponding extra spec. """ # Extract the extra spec keys of concern and their corresponding # kwarg keys as lists. keys_of_interest = list(string_specs_map) provisioning_args = [string_specs_map[key] for key in keys_of_interest] # Set missing spec values to 'false' for key in keys_of_interest: if key not in specs: specs[key] = None provisioning_values = [specs[key] for key in keys_of_interest] # Combine the list of provisioning args and the list of provisioning # values into a dictionary suitable for use as kwargs when invoking # provisioning methods from the client API library. return dict(zip(provisioning_args, provisioning_values)) @na_utils.trace def _get_provisioning_options(self, specs): """Return a merged result of string and binary provisioning options.""" boolean_args = self._get_boolean_provisioning_options( specs, self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) string_args = self._get_string_provisioning_options( specs, self.STRING_QUALIFIED_EXTRA_SPECS_MAP) result = boolean_args.copy() result.update(string_args) return result @na_utils.trace def _check_aggregate_extra_specs_validity(self, aggregate_name, specs): for specs_key in ('netapp_disk_type', 'netapp_raid_type'): aggr_value = self._ssc_stats.get(aggregate_name, {}).get(specs_key) specs_value = specs.get(specs_key) if aggr_value and specs_value and aggr_value != specs_value: msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in aggregate %(aggr)s.') msg_args = { 'value': specs_value, 'key': specs_key, 'aggr': aggregate_name } raise exception.NetAppException(msg % msg_args) @na_utils.trace def _allocate_container_from_snapshot( self, share, snapshot, vserver_client, snapshot_name_func=_get_backend_snapshot_name): """Clones existing share.""" share_name = self._get_backend_share_name(share['id']) parent_share_name = self._get_backend_share_name(snapshot['share_id']) if snapshot.get('provider_location') is None: parent_snapshot_name = snapshot_name_func(self, snapshot['id']) else: parent_snapshot_name = snapshot['provider_location'] LOG.debug('Creating share from snapshot %s', snapshot['id']) vserver_client.create_volume_clone(share_name, parent_share_name, parent_snapshot_name) @na_utils.trace def _share_exists(self, share_name, vserver_client): return vserver_client.volume_exists(share_name) @na_utils.trace def delete_share(self, context, share, share_server=None): """Deletes share.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for share being " "deleted: %(share)s. Deletion of share record " "will proceed anyway. Error: %(error)s"), {'share': share['id'], 'error': error}) return share_name = self._get_backend_share_name(share['id']) if self._share_exists(share_name, vserver_client): self._remove_export(share, vserver_client) self._deallocate_container(share_name, vserver_client) else: LOG.info(_LI("Share %s does not exist."), share['id']) @na_utils.trace def _deallocate_container(self, share_name, vserver_client): """Free share space.""" vserver_client.unmount_volume(share_name, force=True) vserver_client.offline_volume(share_name) vserver_client.delete_volume(share_name) @na_utils.trace def _create_export(self, share, share_server, vserver, vserver_client): """Creates NAS storage.""" helper = self._get_helper(share) helper.set_client(vserver_client) share_name = self._get_backend_share_name(share['id']) interfaces = vserver_client.get_network_interfaces( protocols=[share['share_proto']]) if not interfaces: msg = _('Cannot find network interfaces for Vserver %(vserver)s ' 'and protocol %(proto)s.') msg_args = {'vserver': vserver, 'proto': share['share_proto']} raise exception.NetAppException(msg % msg_args) # Get LIF addresses with metadata export_addresses = self._get_export_addresses_with_metadata( share, share_server, interfaces) # Create the share and get a callback for generating export locations callback = helper.create_share(share, share_name) # Generate export locations using addresses, metadata and callback export_locations = [ { 'path': callback(export_address), 'is_admin_only': metadata.pop('is_admin_only', False), 'metadata': metadata, } for export_address, metadata in copy.deepcopy(export_addresses).items() ] # Sort the export locations to report preferred paths first export_locations = self._sort_export_locations_by_preferred_paths( export_locations) return export_locations @na_utils.trace def _get_export_addresses_with_metadata(self, share, share_server, interfaces): """Return interface addresses with locality and other metadata.""" # Get home node so we can identify preferred paths aggregate_name = share_utils.extract_host(share['host'], level='pool') home_node = self._get_aggregate_node(aggregate_name) # Get admin LIF addresses so we can identify admin export locations admin_addresses = self._get_admin_addresses_for_share_server( share_server) addresses = {} for interface in interfaces: address = interface['address'] is_admin_only = address in admin_addresses if home_node: preferred = interface.get('home-node') == home_node else: preferred = None addresses[address] = { 'is_admin_only': is_admin_only, 'preferred': preferred, } return addresses @na_utils.trace def _get_admin_addresses_for_share_server(self, share_server): if not share_server: return [] admin_addresses = [] for network_allocation in share_server.get('network_allocations'): if network_allocation['label'] == 'admin': admin_addresses.append(network_allocation['ip_address']) return admin_addresses @na_utils.trace def _sort_export_locations_by_preferred_paths(self, export_locations): """Sort the export locations to report preferred paths first.""" sort_key = lambda location: location.get( 'metadata', {}).get('preferred') is not True return sorted(export_locations, key=sort_key) @na_utils.trace def _remove_export(self, share, vserver_client): """Deletes NAS storage.""" helper = self._get_helper(share) helper.set_client(vserver_client) share_name = self._get_backend_share_name(share['id']) target = helper.get_target(share) # Share may be in error state, so there's no share and target. if target: helper.delete_share(share, share_name) @na_utils.trace def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of a share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(snapshot['share_id']) snapshot_name = self._get_backend_snapshot_name(snapshot['id']) LOG.debug('Creating snapshot %s', snapshot_name) vserver_client.create_snapshot(share_name, snapshot_name) return {'provider_location': snapshot_name} @na_utils.trace def delete_snapshot(self, context, snapshot, share_server=None, snapshot_name=None): """Deletes a snapshot of a share.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for snapshot " "being deleted: %(snap)s. Deletion of snapshot " "record will proceed anyway. Error: %(error)s"), {'snap': snapshot['id'], 'error': error}) return share_name = self._get_backend_share_name(snapshot['share_id']) snapshot_name = (snapshot.get('provider_location') or snapshot_name or self._get_backend_snapshot_name(snapshot['id'])) try: self._delete_snapshot(vserver_client, share_name, snapshot_name) except exception.SnapshotResourceNotFound: msg = _LI("Snapshot %(snap)s does not exist on share %(share)s.") msg_args = {'snap': snapshot_name, 'share': share_name} LOG.info(msg, msg_args) def _delete_snapshot(self, vserver_client, share_name, snapshot_name): """Deletes a backend snapshot, handling busy snapshots as needed.""" backend_snapshot = vserver_client.get_snapshot(share_name, snapshot_name) LOG.debug('Deleting snapshot %(snap)s for share %(share)s.', {'snap': snapshot_name, 'share': share_name}) if not backend_snapshot['busy']: vserver_client.delete_snapshot(share_name, snapshot_name) elif backend_snapshot['owners'] == {'volume clone'}: # Snapshots are locked by clone(s), so split clone and soft delete snapshot_children = vserver_client.get_clone_children_for_snapshot( share_name, snapshot_name) for snapshot_child in snapshot_children: vserver_client.split_volume_clone(snapshot_child['name']) vserver_client.soft_delete_snapshot(share_name, snapshot_name) else: raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name) @na_utils.trace def manage_existing(self, share, driver_options): vserver, vserver_client = self._get_vserver(share_server=None) share_size = self._manage_container(share, vserver_client) export_locations = self._create_export(share, None, vserver, vserver_client) return {'size': share_size, 'export_locations': export_locations} @na_utils.trace def unmanage(self, share): pass @na_utils.trace def _manage_container(self, share, vserver_client): """Bring existing volume under management as a share.""" protocol_helper = self._get_helper(share) protocol_helper.set_client(vserver_client) volume_name = protocol_helper.get_share_name_for_share(share) if not volume_name: msg = _('Volume could not be determined from export location ' '%(export)s.') msg_args = {'export': share['export_location']} raise exception.ManageInvalidShare(reason=msg % msg_args) share_name = self._get_backend_share_name(share['id']) aggregate_name = share_utils.extract_host(share['host'], level='pool') # Get existing volume info volume = vserver_client.get_volume_to_manage(aggregate_name, volume_name) if not volume: msg = _('Volume %(volume)s not found on aggregate %(aggr)s.') msg_args = {'volume': volume_name, 'aggr': aggregate_name} raise exception.ManageInvalidShare(reason=msg % msg_args) # Ensure volume is manageable self._validate_volume_for_manage(volume, vserver_client) # Validate extra specs extra_specs = share_types.get_extra_specs_from_share(share) try: self._check_extra_specs_validity(share, extra_specs) self._check_aggregate_extra_specs_validity(aggregate_name, extra_specs) except exception.ManilaException as ex: raise exception.ManageExistingShareTypeMismatch( reason=six.text_type(ex)) provisioning_options = self._get_provisioning_options(extra_specs) debug_args = { 'share': share_name, 'aggr': aggregate_name, 'options': provisioning_options } LOG.debug('Managing share %(share)s on aggregate %(aggr)s with ' 'provisioning options %(options)s', debug_args) # Rename & remount volume on new path vserver_client.unmount_volume(volume_name) vserver_client.set_volume_name(volume_name, share_name) vserver_client.mount_volume(share_name) # Modify volume to match extra specs vserver_client.manage_volume(aggregate_name, share_name, **provisioning_options) # Save original volume info to private storage original_data = { 'original_name': volume['name'], 'original_junction_path': volume['junction-path'] } self.private_storage.update(share['id'], original_data) # When calculating the size, round up to the next GB. return int(math.ceil(float(volume['size']) / units.Gi)) @na_utils.trace def _validate_volume_for_manage(self, volume, vserver_client): """Ensure volume is a candidate for becoming a share.""" # Check volume info, extra specs validity if volume['type'] != 'rw' or volume['style'] != 'flex': msg = _('Volume %(volume)s must be a read-write flexible volume.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) if vserver_client.volume_has_luns(volume['name']): msg = _('Volume %(volume)s must not contain LUNs.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) if vserver_client.volume_has_junctioned_volumes(volume['name']): msg = _('Volume %(volume)s must not have junctioned volumes.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) @na_utils.trace def create_consistency_group(self, context, cg_dict, share_server=None): """Creates a consistency group. cDOT has no persistent CG object, so apart from validating the share_server info is passed correctly, this method has nothing to do. """ vserver, vserver_client = self._get_vserver(share_server=share_server) @na_utils.trace def create_consistency_group_from_cgsnapshot( self, context, cg_dict, cgsnapshot_dict, share_server=None): """Creates a consistency group from an existing CG snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) # Ensure there is something to do if not cgsnapshot_dict['cgsnapshot_members']: return None, None clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict) share_update_list = [] LOG.debug('Creating consistency group from CG snapshot %s.', cgsnapshot_dict['id']) for clone in clone_list: self._allocate_container_from_snapshot( clone['share'], clone['snapshot'], vserver_client, NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name) export_locations = self._create_export(clone['share'], share_server, vserver, vserver_client) share_update_list.append({ 'id': clone['share']['id'], 'export_locations': export_locations, }) return None, share_update_list def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict): """Collate the data for a clone of a CG snapshot. Given two data structures, a CG snapshot (cgsnapshot_dict) and a new CG to be cloned from the snapshot (cg_dict), match up both structures into a list of dicts (share & snapshot) suitable for use by existing driver methods that clone individual share snapshots. """ clone_list = list() for share in cg_dict['shares']: clone_info = {'share': share} for cgsnapshot_member in cgsnapshot_dict['cgsnapshot_members']: if (share['source_cgsnapshot_member_id'] == cgsnapshot_member['id']): clone_info['snapshot'] = { 'share_id': cgsnapshot_member['share_id'], 'id': cgsnapshot_member['cgsnapshot_id'] } break else: msg = _("Invalid data supplied for creating consistency group " "from CG snapshot %s.") % cgsnapshot_dict['id'] raise exception.InvalidConsistencyGroup(reason=msg) clone_list.append(clone_info) return clone_list @na_utils.trace def delete_consistency_group(self, context, cg_dict, share_server=None): """Deletes a consistency group. cDOT has no persistent CG object, so apart from validating the share_server info is passed correctly, this method has nothing to do. """ try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for consistency " "group being deleted: %(cg)s. Deletion of CG " "record will proceed anyway. Error: %(error)s"), {'cg': cg_dict['id'], 'error': error}) @na_utils.trace def create_cgsnapshot(self, context, snap_dict, share_server=None): """Creates a consistency group snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_names = [self._get_backend_share_name(member['share_id']) for member in snap_dict.get('cgsnapshot_members', [])] snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id']) if share_names: LOG.debug('Creating CG snapshot %s.', snapshot_name) vserver_client.create_cg_snapshot(share_names, snapshot_name) return None, None @na_utils.trace def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Deletes a consistency group snapshot.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for CG snapshot " "being deleted: %(snap)s. Deletion of CG snapshot " "record will proceed anyway. Error: %(error)s"), {'snap': snap_dict['id'], 'error': error}) return None, None share_names = [self._get_backend_share_name(member['share_id']) for member in snap_dict.get('cgsnapshot_members', [])] snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id']) for share_name in share_names: try: self._delete_snapshot( vserver_client, share_name, snapshot_name) except exception.SnapshotResourceNotFound: msg = _LI("Snapshot %(snap)s does not exist on share " "%(share)s.") msg_args = {'snap': snapshot_name, 'share': share_name} LOG.info(msg, msg_args) continue return None, None @na_utils.trace def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(share['id']) LOG.debug('Extending share %(name)s to %(size)s GB.', {'name': share_name, 'size': new_size}) vserver_client.set_volume_size(share_name, new_size) @na_utils.trace def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(share['id']) LOG.debug('Shrinking share %(name)s to %(size)s GB.', {'name': share_name, 'size': new_size}) vserver_client.set_volume_size(share_name, new_size) @na_utils.trace def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Updates access rules for a share.""" # NOTE(ameade): We do not need to add export rules to a non-active # replica as it will fail. replica_state = share.get('replica_state') if (replica_state is not None and replica_state != constants.REPLICA_STATE_ACTIVE): return try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for share " "%(share)s during access rules update. " "Error: %(error)s"), {'share': share['id'], 'error': error}) return share_name = self._get_backend_share_name(share['id']) if self._share_exists(share_name, vserver_client): helper = self._get_helper(share) helper.set_client(vserver_client) helper.update_access(share, share_name, access_rules) else: raise exception.ShareResourceNotFound(share_id=share['id']) def setup_server(self, network_info, metadata=None): raise NotImplementedError() def teardown_server(self, server_details, security_services=None): raise NotImplementedError() def get_network_allocations_number(self): """Get number of network interfaces to be created.""" raise NotImplementedError() @na_utils.trace def _update_ssc_info(self): """Periodically runs to update Storage Service Catalog data. The self._ssc_stats attribute is updated with the following format. { : {: }} """ LOG.info(_LI("Updating storage service catalog information for " "backend '%s'"), self._backend_name) # Work on a copy and update the ssc data atomically before returning. ssc_stats = copy.deepcopy(self._ssc_stats) aggregate_names = self._find_matching_aggregates() # Initialize entries for each aggregate. for aggregate_name in aggregate_names: if aggregate_name not in ssc_stats: ssc_stats[aggregate_name] = {} if aggregate_names: self._update_ssc_aggr_info(aggregate_names, ssc_stats) self._ssc_stats = ssc_stats @na_utils.trace def _update_ssc_aggr_info(self, aggregate_names, ssc_stats): """Updates the given SSC dictionary with new disk type information. :param aggregate_names: The aggregates this driver cares about :param ssc_stats: The dictionary to update """ if not self._have_cluster_creds: return raid_types = self._client.get_aggregate_raid_types(aggregate_names) for aggregate_name, raid_type in raid_types.items(): ssc_stats[aggregate_name]['netapp_raid_type'] = raid_type disk_types = self._client.get_aggregate_disk_types(aggregate_names) for aggregate_name, disk_type in disk_types.items(): ssc_stats[aggregate_name]['netapp_disk_type'] = disk_type def _find_active_replica(self, replica_list): # NOTE(ameade): Find current active replica. There can only be one # active replica (SnapMirror source volume) at a time in cDOT. for r in replica_list: if r['replica_state'] == constants.REPLICA_STATE_ACTIVE: return r def create_replica(self, context, replica_list, new_replica, access_rules, share_snapshots, share_server=None): """Creates the new replica on this backend and sets up SnapMirror.""" active_replica = self._find_active_replica(replica_list) dm_session = data_motion.DataMotionSession() # 1. Create the destination share dest_backend = share_utils.extract_host(new_replica['host'], level='backend_name') vserver = (dm_session.get_vserver_from_share(new_replica) or self.configuration.netapp_vserver) vserver_client = data_motion.get_client_for_backend( dest_backend, vserver_name=vserver) self._allocate_container(new_replica, vserver_client, replica=True) # 2. Setup SnapMirror dm_session.create_snapmirror(active_replica, new_replica) model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } return model_update def delete_replica(self, context, replica_list, replica, share_snapshots, share_server=None): """Removes the replica on this backend and destroys SnapMirror.""" dm_session = data_motion.DataMotionSession() # 1. Remove SnapMirror dest_backend = share_utils.extract_host(replica['host'], level='backend_name') vserver = (dm_session.get_vserver_from_share(replica) or self.configuration.netapp_vserver) # Ensure that all potential snapmirror relationships and their metadata # involving the replica are destroyed. for other_replica in replica_list: dm_session.delete_snapmirror(other_replica, replica) dm_session.delete_snapmirror(replica, other_replica) # 2. Delete share vserver_client = data_motion.get_client_for_backend( dest_backend, vserver_name=vserver) share_name = self._get_backend_share_name(replica['id']) if self._share_exists(share_name, vserver_client): self._deallocate_container(share_name, vserver_client) def update_replica_state(self, context, replica_list, replica, access_rules, share_snapshots, share_server=None): """Returns the status of the given replica on this backend.""" active_replica = self._find_active_replica(replica_list) share_name = self._get_backend_share_name(replica['id']) vserver, vserver_client = self._get_vserver(share_server=share_server) if not vserver_client.volume_exists(share_name): msg = _("Volume %(share_name)s does not exist on vserver " "%(vserver)s.") msg_args = {'share_name': share_name, 'vserver': vserver} raise exception.ShareResourceNotFound(msg % msg_args) dm_session = data_motion.DataMotionSession() try: snapmirrors = dm_session.get_snapmirrors(active_replica, replica) except netapp_api.NaApiError: LOG.exception(_LE("Could not get snapmirrors for replica %s."), replica['id']) return constants.STATUS_ERROR if not snapmirrors: if replica['status'] != constants.STATUS_CREATING: try: dm_session.create_snapmirror(active_replica, replica) except netapp_api.NaApiError: LOG.exception(_LE("Could not create snapmirror for " "replica %s."), replica['id']) return constants.STATUS_ERROR return constants.REPLICA_STATE_OUT_OF_SYNC snapmirror = snapmirrors[0] if (snapmirror.get('mirror-state') != 'snapmirrored' and snapmirror.get('relationship-status') == 'transferring'): return constants.REPLICA_STATE_OUT_OF_SYNC if snapmirror.get('mirror-state') != 'snapmirrored': try: vserver_client.resume_snapmirror(snapmirror['source-vserver'], snapmirror['source-volume'], vserver, share_name) vserver_client.resync_snapmirror(snapmirror['source-vserver'], snapmirror['source-volume'], vserver, share_name) return constants.REPLICA_STATE_OUT_OF_SYNC except netapp_api.NaApiError: LOG.exception(_LE("Could not resync snapmirror.")) return constants.STATUS_ERROR last_update_timestamp = float( snapmirror.get('last-transfer-end-timestamp', 0)) # TODO(ameade): Have a configurable RPO for replicas, for now it is # one hour. if (last_update_timestamp and (timeutils.is_older_than( timeutils.iso8601_from_timestamp(last_update_timestamp), 3600))): return constants.REPLICA_STATE_OUT_OF_SYNC # Check all snapshots exist snapshots = [snap['share_replica_snapshot'] for snap in share_snapshots] for snap in snapshots: snapshot_name = snap.get('provider_location') if not vserver_client.snapshot_exists(snapshot_name, share_name): return constants.REPLICA_STATE_OUT_OF_SYNC return constants.REPLICA_STATE_IN_SYNC def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): """Switch SnapMirror relationships and allow r/w ops on replica. Creates a DataMotion session and switches the direction of the SnapMirror relationship between the currently 'active' instance ( SnapMirror source volume) and the replica. Also attempts setting up SnapMirror relationships between the other replicas and the new SnapMirror source volume ('active' instance). :param context: Request Context :param replica_list: List of replicas, including the 'active' instance :param replica: Replica to promote to SnapMirror source :param access_rules: Access rules to apply to the replica :param share_server: ShareServer class instance of replica :return: Updated replica_list """ orig_active_replica = self._find_active_replica(replica_list) dm_session = data_motion.DataMotionSession() new_replica_list = [] # Setup the new active replica try: new_active_replica = ( self._convert_destination_replica_to_independent( context, dm_session, orig_active_replica, replica, access_rules, share_server=share_server)) except exception.StorageCommunicationException: LOG.exception(_LE("Could not communicate with the backend " "for replica %s during promotion."), replica['id']) new_active_replica = copy.deepcopy(replica) new_active_replica['replica_state'] = ( constants.STATUS_ERROR) new_active_replica['status'] = constants.STATUS_ERROR return [new_active_replica] new_replica_list.append(new_active_replica) # Change the source replica for all destinations to the new # active replica. for r in replica_list: if r['id'] != replica['id']: r = self._safe_change_replica_source(dm_session, r, orig_active_replica, replica, replica_list) new_replica_list.append(r) return new_replica_list def _convert_destination_replica_to_independent( self, context, dm_session, orig_active_replica, replica, access_rules, share_server=None): """Breaks SnapMirror and allows r/w ops on the destination replica. For promotion, the existing SnapMirror relationship must be broken and access rules have to be granted to the broken off replica to use it as an independent share. :param context: Request Context :param dm_session: Data motion object for SnapMirror operations :param orig_active_replica: Original SnapMirror source :param replica: Replica to promote to SnapMirror source :param access_rules: Access rules to apply to the replica :param share_server: ShareServer class instance of replica :return: Updated replica """ vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(replica['id']) try: # 1. Start an update to try to get a last minute transfer before we # quiesce and break dm_session.update_snapmirror(orig_active_replica, replica) except exception.StorageCommunicationException: # Ignore any errors since the current source replica may be # unreachable pass # 2. Break SnapMirror dm_session.break_snapmirror(orig_active_replica, replica) # 3. Setup access rules new_active_replica = copy.deepcopy(replica) helper = self._get_helper(replica) helper.set_client(vserver_client) try: helper.update_access(replica, share_name, access_rules) except Exception: new_active_replica['access_rules_status'] = ( constants.STATUS_OUT_OF_SYNC) else: new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE new_active_replica['export_locations'] = self._create_export( new_active_replica, share_server, vserver, vserver_client) new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE return new_active_replica def _safe_change_replica_source(self, dm_session, replica, orig_source_replica, new_source_replica, replica_list): """Attempts to change the SnapMirror source to new source. If the attempt fails, 'replica_state' is set to 'error'. :param dm_session: Data motion object for SnapMirror operations :param replica: Replica that requires a change of source :param orig_source_replica: Original SnapMirror source volume :param new_source_replica: New SnapMirror source volume :return: Updated replica """ try: dm_session.change_snapmirror_source(replica, orig_source_replica, new_source_replica, replica_list) except exception.StorageCommunicationException: replica['status'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR replica['export_locations'] = [] msg = _LE("Failed to change replica (%s) to a SnapMirror " "destination. Replica backend is unreachable.") LOG.exception(msg, replica['id']) return replica except netapp_api.NaApiError: replica['replica_state'] = constants.STATUS_ERROR replica['export_locations'] = [] msg = _LE("Failed to change replica (%s) to a SnapMirror " "destination.") LOG.exception(msg, replica['id']) return replica replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica['export_locations'] = [] return replica def create_replicated_snapshot(self, context, replica_list, snapshot_instances, share_server=None): active_replica = self._find_active_replica(replica_list) active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] snapshot_name = self._get_backend_snapshot_name(active_snapshot['id']) self.create_snapshot(context, active_snapshot, share_server=share_server) active_snapshot['status'] = constants.STATUS_AVAILABLE active_snapshot['provider_location'] = snapshot_name snapshots = [active_snapshot] instances = zip(sorted(replica_list, key=lambda x: x['id']), sorted(snapshot_instances, key=lambda x: x['share_id'])) for replica, snapshot in instances: if snapshot['id'] != active_snapshot['id']: snapshot['provider_location'] = snapshot_name snapshots.append(snapshot) dm_session = data_motion.DataMotionSession() if replica.get('host'): try: dm_session.update_snapmirror(active_replica, replica) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise return snapshots def delete_replicated_snapshot(self, context, replica_list, snapshot_instances, share_server=None): active_replica = self._find_active_replica(replica_list) active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] self.delete_snapshot(context, active_snapshot, share_server=share_server, snapshot_name=active_snapshot['provider_location'] ) active_snapshot['status'] = constants.STATUS_DELETED instances = zip(sorted(replica_list, key=lambda x: x['id']), sorted(snapshot_instances, key=lambda x: x['share_id'])) for replica, snapshot in instances: if snapshot['id'] != active_snapshot['id']: dm_session = data_motion.DataMotionSession() if replica.get('host'): try: dm_session.update_snapmirror(active_replica, replica) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise return [active_snapshot] def update_replicated_snapshot(self, replica_list, share_replica, snapshot_instances, snapshot_instance, share_server=None): active_replica = self._find_active_replica(replica_list) vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name( snapshot_instance['share_id']) snapshot_name = snapshot_instance.get('provider_location') # NOTE(ameade): If there is no provider location, # then grab from active snapshot instance if snapshot_name is None: active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] snapshot_name = active_snapshot.get('provider_location') if not snapshot_name: return try: snapshot_exists = vserver_client.snapshot_exists(snapshot_name, share_name) except exception.SnapshotUnavailable: # The volume must still be offline return if (snapshot_exists and snapshot_instance['status'] == constants.STATUS_CREATING): return { 'status': constants.STATUS_AVAILABLE, 'provider_location': snapshot_name, } elif (not snapshot_exists and snapshot_instance['status'] == constants.STATUS_DELETING): raise exception.SnapshotResourceNotFound( name=snapshot_instance.get('provider_location')) dm_session = data_motion.DataMotionSession() try: dm_session.update_snapmirror(active_replica, share_replica) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py0000664000567000056710000004037012701407107030736 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP data motion library. This library handles transferring data from a source to a destination. Its responsibility is to handle this as efficiently as possible given the location of the data's source and destination. This includes cloning, SnapMirror, and copy-offload as improvements to brute force data transfer. """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from manila import exception from manila.i18n import _LE, _LI, _LW from manila.share import configuration from manila.share import driver from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) CONF = cfg.CONF def get_backend_configuration(backend_name): for section in CONF.list_all_sections(): config = configuration.Configuration(driver.share_opts, config_group=section) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_replication_opts) if (config.share_backend_name and config.share_backend_name.lower() == backend_name.lower()): return config msg = _LW("Could not find backend %s in configuration.") LOG.warning(msg % backend_name) def get_client_for_backend(backend_name, vserver_name=None): config = get_backend_configuration(backend_name) client = client_cmode.NetAppCmodeClient( transport_type=config.netapp_transport_type, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, trace=na_utils.TRACE_API) return client class DataMotionSession(object): def _get_backend_volume_name(self, config, share_obj): """Return the calculated backend name of the share. Uses the netapp_volume_name_template configuration value for the backend to calculate the volume name on the array for the share. """ volume_name = config.netapp_volume_name_template % { 'share_id': share_obj['id'].replace('-', '_')} return volume_name def get_vserver_from_share(self, share_obj): share_server = share_obj.get('share_server') if share_server: backend_details = share_server.get('backend_details') if backend_details: return backend_details.get('vserver_name') def get_backend_info_for_share(self, share_obj): backend_name = share_utils.extract_host( share_obj['host'], level='backend_name') config = get_backend_configuration(backend_name) vserver = (self.get_vserver_from_share(share_obj) or config.netapp_vserver) volume_name = self._get_backend_volume_name( config, share_obj) return volume_name, vserver, backend_name def get_snapmirrors(self, source_share_obj, dest_share_obj): dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) snapmirrors = dest_client.get_snapmirrors( src_vserver, src_volume_name, dest_vserver, dest_volume_name, desired_attributes=['relationship-status', 'mirror-state', 'source-vserver', 'source-volume', 'last-transfer-end-timestamp']) return snapmirrors def create_snapmirror(self, source_share_obj, dest_share_obj): """Sets up a SnapMirror relationship between two volumes. 1. Create SnapMirror relationship 2. Initialize data transfer asynchronously """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Create SnapMirror relationship # TODO(ameade): Change the schedule from hourly to a config value dest_client.create_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name, schedule='hourly') # 2. Initialize async transfer of the initial data dest_client.initialize_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def delete_snapmirror(self, source_share_obj, dest_share_obj, release=True): """Ensures all information about a SnapMirror relationship is removed. 1. Abort snapmirror 2. Delete the snapmirror 3. Release snapmirror to cleanup snapmirror metadata and snapshots """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, src_backend = ( self.get_backend_info_for_share(source_share_obj)) # 1. Abort any ongoing transfers try: dest_client.abort_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name, clear_checkpoint=False) except netapp_api.NaApiError as e: # Snapmirror is already deleted pass # 2. Delete SnapMirror Relationship and cleanup destination snapshots try: dest_client.delete_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.info(_LI('No snapmirror relationship to delete')) exc_context.reraise = False if release: # If the source is unreachable, do not perform the release try: src_client = get_client_for_backend(src_backend, vserver_name=src_vserver) except Exception: src_client = None # 3. Cleanup SnapMirror relationship on source try: if src_client: src_client.release_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): # Handle the case where the snapmirror is already # cleaned up exc_context.reraise = False def update_snapmirror(self, source_share_obj, dest_share_obj): """Schedule a snapmirror update to happen on the backend.""" dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # Update SnapMirror dest_client.update_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def quiesce_then_abort(self, source_share_obj, dest_share_obj): dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Attempt to quiesce, then abort dest_client.quiesce_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) config = get_backend_configuration(share_utils.extract_host( source_share_obj['host'], level='backend_name')) retries = config.netapp_snapmirror_quiesce_timeout / 5 @utils.retry(exception.ReplicationException, interval=5, retries=retries, backoff_rate=1) def wait_for_quiesced(): snapmirror = dest_client.get_snapmirrors( src_vserver, src_volume_name, dest_vserver, dest_volume_name, desired_attributes=['relationship-status', 'mirror-state'] )[0] if snapmirror.get('relationship-status') != 'quiesced': raise exception.ReplicationException( reason=_LE("Snapmirror relationship is not quiesced.")) try: wait_for_quiesced() except exception.ReplicationException: dest_client.abort_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name, clear_checkpoint=False) def break_snapmirror(self, source_share_obj, dest_share_obj): """Breaks SnapMirror relationship. 1. Quiesce any ongoing snapmirror transfers 2. Wait until snapmirror finishes transfers and enters quiesced state 3. Break snapmirror 4. Mount the destination volume so it is exported as a share """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Attempt to quiesce, then abort self.quiesce_then_abort(source_share_obj, dest_share_obj) # 2. Break SnapMirror dest_client.break_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) # 3. Mount the destination volume and create a junction path dest_client.mount_volume(dest_volume_name) def resync_snapmirror(self, source_share_obj, dest_share_obj): """Resync SnapMirror relationship. """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) dest_client.resync_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def resume_snapmirror(self, source_share_obj, dest_share_obj): """Resume SnapMirror relationship from a quiesced state.""" dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) dest_client.resume_snapmirror(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def change_snapmirror_source(self, replica, orig_source_replica, new_source_replica, replica_list): """Creates SnapMirror relationship from the new source to destination. 1. Delete all snapmirrors involving the replica, but maintain snapmirror metadata and snapshots for efficiency 2. Ensure a new source -> replica snapmirror exists 3. Resync new source -> replica snapmirror relationship """ replica_volume_name, replica_vserver, replica_backend = ( self.get_backend_info_for_share(replica)) replica_client = get_client_for_backend(replica_backend, vserver_name=replica_vserver) new_src_volume_name, new_src_vserver, __ = ( self.get_backend_info_for_share(new_source_replica)) # 1. delete for other_replica in replica_list: if other_replica['id'] == replica['id']: continue # We need to delete ALL snapmirror relationships # involving this replica but do not remove snapmirror metadata # so that the new snapmirror relationship is efficient. self.delete_snapmirror(other_replica, replica, release=False) self.delete_snapmirror(replica, other_replica, release=False) # 2. create # TODO(ameade): Update the schedule if needed. replica_client.create_snapmirror(new_src_vserver, new_src_volume_name, replica_vserver, replica_volume_name, schedule='hourly') # 3. resync replica_client.resync_snapmirror(new_src_vserver, new_src_volume_name, replica_vserver, replica_volume_name) manila-2.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py0000664000567000056710000003144312701407107031306 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT multi-SVM storage driver library. This library extends the abstract base library and completes the multi-SVM functionality needed by the cDOT multi-SVM Manila driver. This library variant creates Data ONTAP storage virtual machines (i.e. 'vservers') as needed to provision shares. """ import re from oslo_log import log from oslo_utils import excutils from manila import exception from manila.i18n import _, _LE, _LW, _LI from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp import utils as na_utils from manila import utils LOG = log.getLogger(__name__) SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan') SEGMENTED_NETWORK_TYPES = ('vlan',) class NetAppCmodeMultiSVMFileStorageLibrary( lib_base.NetAppCmodeFileStorageLibrary): @na_utils.trace def check_for_setup_error(self): if self._have_cluster_creds: if self.configuration.netapp_vserver: msg = _LW('Vserver is specified in the configuration. This is ' 'ignored when the driver is managing share servers.') LOG.warning(msg) else: # only have vserver creds, which is an error in multi_svm mode msg = _('Cluster credentials must be specified in the ' 'configuration when the driver is managing share servers.') raise exception.InvalidInput(reason=msg) # Ensure one or more aggregates are available. if not self._find_matching_aggregates(): msg = _('No aggregates are available for provisioning shares. ' 'Ensure that the configuration option ' 'netapp_aggregate_name_search_pattern is set correctly.') raise exception.NetAppException(msg) super(NetAppCmodeMultiSVMFileStorageLibrary, self).\ check_for_setup_error() @na_utils.trace def _get_vserver(self, share_server=None): if not share_server: msg = _('Share server not provided') raise exception.InvalidInput(reason=msg) backend_details = share_server.get('backend_details') vserver = backend_details.get( 'vserver_name') if backend_details else None if not vserver: msg = _('Vserver name is absent in backend details. Please ' 'check whether Vserver was created properly.') raise exception.VserverNotSpecified(msg) if not self._client.vserver_exists(vserver): raise exception.VserverNotFound(vserver=vserver) vserver_client = self._get_api_client(vserver) return vserver, vserver_client @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" self._client.prune_deleted_nfs_export_policies() self._client.prune_deleted_snapshots() super(NetAppCmodeMultiSVMFileStorageLibrary, self).\ _handle_housekeeping_tasks() @na_utils.trace def _find_matching_aggregates(self): """Find all aggregates match pattern.""" aggregate_names = self._client.list_aggregates() pattern = self.configuration.netapp_aggregate_name_search_pattern return [aggr_name for aggr_name in aggregate_names if re.match(pattern, aggr_name)] @na_utils.trace def setup_server(self, network_info, metadata=None): """Creates and configures new Vserver.""" LOG.debug('Creating server %s', network_info['server_id']) self._validate_network_type(network_info) vserver_name = self._get_vserver_name(network_info['server_id']) server_details = {'vserver_name': vserver_name} try: self._create_vserver(vserver_name, network_info) except Exception as e: e.detail_data = {'server_details': server_details} raise return server_details @na_utils.trace def _validate_network_type(self, network_info): """Raises exception if the segmentation type is incorrect.""" if network_info['network_type'] not in SUPPORTED_NETWORK_TYPES: msg = _('The specified network type %s is unsupported by the ' 'NetApp clustered Data ONTAP driver') raise exception.NetworkBadConfigurationException( reason=msg % network_info['network_type']) @na_utils.trace def _get_vserver_name(self, server_id): return self.configuration.netapp_vserver_name_template % server_id @na_utils.trace def _create_vserver(self, vserver_name, network_info): """Creates Vserver with given parameters if it doesn't exist.""" if self._client.vserver_exists(vserver_name): msg = _('Vserver %s already exists.') raise exception.NetAppException(msg % vserver_name) ipspace_name = self._create_ipspace(network_info) LOG.debug('Vserver %s does not exist, creating.', vserver_name) self._client.create_vserver( vserver_name, self.configuration.netapp_root_volume_aggregate, self.configuration.netapp_root_volume, self._find_matching_aggregates(), ipspace_name) vserver_client = self._get_api_client(vserver=vserver_name) security_services = None try: self._create_vserver_lifs(vserver_name, vserver_client, network_info, ipspace_name) self._create_vserver_admin_lif(vserver_name, vserver_client, network_info, ipspace_name) vserver_client.enable_nfs() security_services = network_info.get('security_services') if security_services: self._client.setup_security_services(security_services, vserver_client, vserver_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to configure Vserver.")) self._delete_vserver(vserver_name, security_services=security_services) def _get_valid_ipspace_name(self, network_id): """Get IPspace name according to network id.""" return 'ipspace_' + network_id.replace('-', '_') @na_utils.trace def _create_ipspace(self, network_info): """If supported, create an IPspace for a new Vserver.""" if not self._client.features.IPSPACES: return None if (network_info['network_allocations'][0]['network_type'] not in SEGMENTED_NETWORK_TYPES): return client_cmode.DEFAULT_IPSPACE # NOTE(cknight): Neutron needs cDOT IP spaces because it can provide # overlapping IP address ranges for different subnets. That is not # believed to be an issue for any of Manila's other network plugins. ipspace_id = network_info.get('neutron_subnet_id') if not ipspace_id: return client_cmode.DEFAULT_IPSPACE ipspace_name = self._get_valid_ipspace_name(ipspace_id) if not self._client.ipspace_exists(ipspace_name): self._client.create_ipspace(ipspace_name) return ipspace_name @na_utils.trace def _create_vserver_lifs(self, vserver_name, vserver_client, network_info, ipspace_name): """Create Vserver data logical interfaces (LIFs).""" nodes = self._client.list_cluster_nodes() node_network_info = zip(nodes, network_info['network_allocations']) for node_name, network_allocation in node_network_info: lif_name = self._get_lif_name(node_name, network_allocation) self._create_lif(vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation) @na_utils.trace def _create_vserver_admin_lif(self, vserver_name, vserver_client, network_info, ipspace_name): """Create Vserver admin LIF, if defined.""" network_allocations = network_info.get('admin_network_allocations') if not network_allocations: LOG.info(_LI('No admin network defined for Vserver %s.') % vserver_name) return node_name = self._client.list_cluster_nodes()[0] network_allocation = network_allocations[0] lif_name = self._get_lif_name(node_name, network_allocation) self._create_lif(vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation) @na_utils.trace def _get_node_data_port(self, node): port_names = self._client.list_node_data_ports(node) pattern = self.configuration.netapp_port_name_search_pattern matched_port_names = [port_name for port_name in port_names if re.match(pattern, port_name)] if not matched_port_names: raise exception.NetAppException( _('Could not find eligible network ports on node %s on which ' 'to create Vserver LIFs.') % node) return matched_port_names[0] def _get_lif_name(self, node_name, network_allocation): """Get LIF name based on template from manila.conf file.""" lif_name_args = { 'node': node_name, 'net_allocation_id': network_allocation['id'], } return self.configuration.netapp_lif_name_template % lif_name_args @na_utils.trace def _create_lif(self, vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation): """Creates LIF for Vserver.""" port = self._get_node_data_port(node_name) ip_address = network_allocation['ip_address'] netmask = utils.cidr_to_netmask(network_allocation['cidr']) vlan = network_allocation['segmentation_id'] if not vserver_client.network_interface_exists( vserver_name, node_name, port, ip_address, netmask, vlan): self._client.create_network_interface( ip_address, netmask, vlan, node_name, port, vserver_name, lif_name, ipspace_name) @na_utils.trace def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return len(self._client.list_cluster_nodes()) @na_utils.trace def get_admin_network_allocations_number(self, admin_network_api): """Get number of network allocations for creating admin LIFs.""" return 1 if admin_network_api else 0 @na_utils.trace def teardown_server(self, server_details, security_services=None): """Teardown share server.""" vserver = server_details.get( 'vserver_name') if server_details else None if not vserver: LOG.warning(_LW("Vserver not specified for share server being " "deleted. Deletion of share server record will " "proceed anyway.")) return elif not self._client.vserver_exists(vserver): LOG.warning(_LW("Could not find Vserver for share server being " "deleted: %s. Deletion of share server " "record will proceed anyway."), vserver) return self._delete_vserver(vserver, security_services=security_services) @na_utils.trace def _delete_vserver(self, vserver, security_services=None): """Delete a Vserver plus IPspace and security services as needed.""" ipspace_name = self._client.get_vserver_ipspace(vserver) vserver_client = self._get_api_client(vserver=vserver) self._client.delete_vserver(vserver, vserver_client, security_services=security_services) if ipspace_name and not self._client.ipspace_has_data_vservers( ipspace_name): self._client.delete_ipspace(ipspace_name) manila-2.0.0/manila/share/drivers/netapp/dataontap/__init__.py0000664000567000056710000000000012701407107025474 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/client/0000775000567000056710000000000012701407265024660 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/client/client_cmode.py0000664000567000056710000032416312701407107027663 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import time from oslo_log import log from oslo_utils import strutils from oslo_utils import units import six from manila import exception from manila.i18n import _, _LE, _LW from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) DELETED_PREFIX = 'deleted_manila_' DEFAULT_IPSPACE = 'Default' DEFAULT_BROADCAST_DOMAIN = 'OpenStack' DEFAULT_MAX_PAGE_LENGTH = 50 class NetAppCmodeClient(client_base.NetAppBaseClient): def __init__(self, **kwargs): super(NetAppCmodeClient, self).__init__(**kwargs) self.vserver = kwargs.get('vserver') self.connection.set_vserver(self.vserver) # Default values to run first api. self.connection.set_api_version(1, 15) (major, minor) = self.get_ontapi_version(cached=False) self.connection.set_api_version(major, minor) self._init_features() def _init_features(self): """Initialize cDOT feature support map.""" super(NetAppCmodeClient, self)._init_features() ontapi_version = self.get_ontapi_version(cached=True) ontapi_1_20 = ontapi_version >= (1, 20) ontapi_1_30 = ontapi_version >= (1, 30) self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) self.features.add_feature('BROADCAST_DOMAINS', supported=ontapi_1_30) self.features.add_feature('IPSPACES', supported=ontapi_1_30) self.features.add_feature('SUBNETS', supported=ontapi_1_30) self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) server.set_vserver(vserver) result = server.invoke_successfully(na_element, True) return result def _has_records(self, api_result_element): if (not api_result_element.get_child_content('num-records') or api_result_element.get_child_content('num-records') == '0'): return False else: return True def _get_record_count(self, api_result_element): try: return int(api_result_element.get_child_content('num-records')) except TypeError: msg = _('Missing record count for NetApp iterator API invocation.') raise exception.NetAppException(msg) def set_vserver(self, vserver): self.vserver = vserver self.connection.set_vserver(vserver) def send_iter_request(self, api_name, api_args=None, max_page_length=DEFAULT_MAX_PAGE_LENGTH): """Invoke an iterator-style getter API.""" if not api_args: api_args = {} api_args['max-records'] = max_page_length # Get first page result = self.send_request(api_name, api_args) # Most commonly, we can just return here if there is no more data next_tag = result.get_child_content('next-tag') if not next_tag: return result # Ensure pagination data is valid and prepare to store remaining pages num_records = self._get_record_count(result) attributes_list = result.get_child_by_name('attributes-list') if not attributes_list: msg = _('Missing attributes list for API %s.') % api_name raise exception.NetAppException(msg) # Get remaining pages, saving data into first page while next_tag is not None: next_api_args = copy.deepcopy(api_args) next_api_args['tag'] = next_tag next_result = self.send_request(api_name, next_api_args) next_attributes_list = next_result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for record in next_attributes_list.get_children(): attributes_list.add_child_elem(record) num_records += self._get_record_count(next_result) next_tag = next_result.get_child_content('next-tag') result.get_child_by_name('num-records').set_content( six.text_type(num_records)) result.get_child_by_name('next-tag').set_content('') return result @na_utils.trace def create_vserver(self, vserver_name, root_volume_aggregate_name, root_volume_name, aggregate_names, ipspace_name): """Creates new vserver and assigns aggregates.""" create_args = { 'vserver-name': vserver_name, 'root-volume-security-style': 'unix', 'root-volume-aggregate': root_volume_aggregate_name, 'root-volume': root_volume_name, 'name-server-switch': { 'nsswitch': 'file', }, } if ipspace_name: if not self.features.IPSPACES: msg = 'IPSpaces are not supported on this backend.' raise exception.NetAppException(msg) else: create_args['ipspace'] = ipspace_name self.send_request('vserver-create', create_args) aggr_list = [{'aggr-name': aggr_name} for aggr_name in aggregate_names] modify_args = { 'aggr-list': aggr_list, 'vserver-name': vserver_name, } self.send_request('vserver-modify', modify_args) @na_utils.trace def vserver_exists(self, vserver_name): """Checks if Vserver exists.""" LOG.debug('Checking if Vserver %s exists', vserver_name) api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } result = self.send_iter_request('vserver-get-iter', api_args) return self._has_records(result) @na_utils.trace def get_vserver_root_volume_name(self, vserver_name): """Get the root volume name of the vserver.""" api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'root-volume': None, }, }, } vserver_info = self.send_iter_request('vserver-get-iter', api_args) try: root_volume_name = vserver_info.get_child_by_name( 'attributes-list').get_child_by_name( 'vserver-info').get_child_content('root-volume') except AttributeError: msg = _('Could not determine root volume name ' 'for Vserver %s.') % vserver_name raise exception.NetAppException(msg) return root_volume_name @na_utils.trace def get_vserver_ipspace(self, vserver_name): """Get the IPspace of the vserver, or None if not supported.""" if not self.features.IPSPACES: return None api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'ipspace': None, }, }, } vserver_info = self.send_iter_request('vserver-get-iter', api_args) try: ipspace = vserver_info.get_child_by_name( 'attributes-list').get_child_by_name( 'vserver-info').get_child_content('ipspace') except AttributeError: msg = _('Could not determine IPspace for Vserver %s.') raise exception.NetAppException(msg % vserver_name) return ipspace @na_utils.trace def ipspace_has_data_vservers(self, ipspace_name): """Check whether an IPspace has any data Vservers assigned to it.""" if not self.features.IPSPACES: return False api_args = { 'query': { 'vserver-info': { 'ipspace': ipspace_name, 'vserver-type': 'data' }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } result = self.send_iter_request('vserver-get-iter', api_args) return self._has_records(result) @na_utils.trace def list_vservers(self, vserver_type='data'): """Get the names of vservers present, optionally filtered by type.""" query = { 'vserver-info': { 'vserver-type': vserver_type, } } if vserver_type else None api_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } if query: api_args['query'] = query result = self.send_iter_request('vserver-get-iter', api_args) vserver_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [vserver_info.get_child_content('vserver-name') for vserver_info in vserver_info_list.get_children()] @na_utils.trace def get_vserver_volume_count(self): """Get the number of volumes present on a cluster or vserver. Call this on a vserver client to see how many volumes exist on that vserver. """ api_args = { 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } volumes_data = self.send_iter_request('volume-get-iter', api_args) return self._get_record_count(volumes_data) @na_utils.trace def delete_vserver(self, vserver_name, vserver_client, security_services=None): """Delete Vserver. Checks if Vserver exists and does not have active shares. Offlines and destroys root volumes. Deletes Vserver. """ if not self.vserver_exists(vserver_name): LOG.error(_LE("Vserver %s does not exist."), vserver_name) return root_volume_name = self.get_vserver_root_volume_name(vserver_name) volumes_count = vserver_client.get_vserver_volume_count() if volumes_count == 1: try: vserver_client.offline_volume(root_volume_name) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOLUMEOFFLINE: LOG.error(_LE("Volume %s is already offline."), root_volume_name) else: raise e vserver_client.delete_volume(root_volume_name) elif volumes_count > 1: msg = _("Cannot delete Vserver. Vserver %s has shares.") raise exception.NetAppException(msg % vserver_name) if security_services: self._terminate_vserver_services(vserver_name, vserver_client, security_services) self.send_request('vserver-destroy', {'vserver-name': vserver_name}) @na_utils.trace def _terminate_vserver_services(self, vserver_name, vserver_client, security_services): for service in security_services: if service['type'] == 'active_directory': api_args = { 'admin-password': service['password'], 'admin-username': service['user'], } try: vserver_client.send_request('cifs-server-delete', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: LOG.error(_LE('CIFS server does not exist for ' 'Vserver %s.'), vserver_name) else: vserver_client.send_request('cifs-server-delete') @na_utils.trace def list_cluster_nodes(self): """Get all available cluster nodes.""" api_args = { 'desired-attributes': { 'node-details-info': { 'node': None, }, }, } result = self.send_iter_request('system-node-get-iter', api_args) nodes_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [node_info.get_child_content('node') for node_info in nodes_info_list.get_children()] @na_utils.trace def list_node_data_ports(self, node): ports = self.get_node_data_ports(node) return [port.get('port') for port in ports] @na_utils.trace def get_node_data_ports(self, node): """Get applicable data ports on the node.""" api_args = { 'query': { 'net-port-info': { 'node': node, 'link-status': 'up', 'port-type': 'physical|if_group', 'role': 'data', }, }, 'desired-attributes': { 'net-port-info': { 'port': None, 'node': None, 'operational-speed': None, 'ifgrp-port': None, }, }, } result = self.send_iter_request('net-port-get-iter', api_args) net_port_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') ports = [] for port_info in net_port_info_list.get_children(): # Skip physical ports that are part of interface groups. if port_info.get_child_content('ifgrp-port'): continue port = { 'node': port_info.get_child_content('node'), 'port': port_info.get_child_content('port'), 'speed': port_info.get_child_content('operational-speed'), } ports.append(port) return self._sort_data_ports_by_speed(ports) @na_utils.trace def _sort_data_ports_by_speed(self, ports): def sort_key(port): value = port.get('speed') if not (value and isinstance(value, six.string_types)): return 0 elif value.isdigit(): return int(value) elif value == 'auto': return 3 elif value == 'undef': return 2 else: return 1 return sorted(ports, key=sort_key, reverse=True) @na_utils.trace def list_aggregates(self): """Get names of all aggregates.""" try: api_args = { 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, }, }, } result = self.send_iter_request('aggr-get-iter', api_args) aggr_list = result.get_child_by_name( 'attributes-list').get_children() except AttributeError: msg = _("Could not list aggregates.") raise exception.NetAppException(msg) return [aggr.get_child_content('aggregate-name') for aggr in aggr_list] @na_utils.trace def list_vserver_aggregates(self): """Returns a list of aggregates available to a vserver. This must be called against a Vserver LIF. """ return list(self.get_vserver_aggregate_capacities().keys()) @na_utils.trace def create_network_interface(self, ip, netmask, vlan, node, port, vserver_name, lif_name, ipspace_name): """Creates LIF on VLAN port.""" home_port_name = port if vlan: self._create_vlan(node, port, vlan) home_port_name = '%(port)s-%(tag)s' % {'port': port, 'tag': vlan} if self.features.BROADCAST_DOMAINS: self._ensure_broadcast_domain_for_port(node, home_port_name, ipspace=ipspace_name) LOG.debug('Creating LIF %(lif)s for Vserver %(vserver)s ', {'lif': lif_name, 'vserver': vserver_name}) api_args = { 'address': ip, 'administrative-status': 'up', 'data-protocols': [ {'data-protocol': 'nfs'}, {'data-protocol': 'cifs'}, ], 'home-node': node, 'home-port': home_port_name, 'netmask': netmask, 'interface-name': lif_name, 'role': 'data', 'vserver': vserver_name, } self.send_request('net-interface-create', api_args) @na_utils.trace def _create_vlan(self, node, port, vlan): try: api_args = { 'vlan-info': { 'parent-interface': port, 'node': node, 'vlanid': vlan, }, } self.send_request('net-vlan-create', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EDUPLICATEENTRY: LOG.debug('VLAN %(vlan)s already exists on port %(port)s', {'vlan': vlan, 'port': port}) else: msg = _('Failed to create VLAN %(vlan)s on ' 'port %(port)s. %(err_msg)s') msg_args = {'vlan': vlan, 'port': port, 'err_msg': e.message} raise exception.NetAppException(msg % msg_args) @na_utils.trace def _ensure_broadcast_domain_for_port(self, node, port, domain=DEFAULT_BROADCAST_DOMAIN, ipspace=DEFAULT_IPSPACE): """Ensure a port is in a broadcast domain. Create one if necessary. If the IPspace:domain pair match for the given port, which commonly happens in multi-node clusters, then there isn't anything to do. Otherwise, we can assume the IPspace is correct and extant by this point, so the remaining task is to remove the port from any domain it is already in, create the desired domain if it doesn't exist, and add the port to the desired domain. """ port_info = self._get_broadcast_domain_for_port(node, port) # Port already in desired ipspace and broadcast domain. if (port_info['ipspace'] == ipspace and port_info['broadcast-domain'] == domain): return # If in another broadcast domain, remove port from it. if port_info['broadcast-domain']: self._remove_port_from_broadcast_domain( node, port, port_info['broadcast-domain'], port_info['ipspace']) # If desired broadcast domain doesn't exist, create it. if not self._broadcast_domain_exists(domain, ipspace): self._create_broadcast_domain(domain, ipspace) # Move the port into the broadcast domain where it is needed. self._add_port_to_broadcast_domain(node, port, domain, ipspace) @na_utils.trace def _get_broadcast_domain_for_port(self, node, port): """Get broadcast domain for a specific port.""" api_args = { 'query': { 'net-port-info': { 'node': node, 'port': port, }, }, 'desired-attributes': { 'net-port-info': { 'broadcast-domain': None, 'ipspace': None, }, }, } result = self.send_iter_request('net-port-get-iter', api_args) net_port_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') port_info = net_port_info_list.get_children() if not port_info: msg = _('Could not find port %(port)s on node %(node)s.') msg_args = {'port': port, 'node': node} raise exception.NetAppException(msg % msg_args) port = { 'broadcast-domain': port_info[0].get_child_content('broadcast-domain'), 'ipspace': port_info[0].get_child_content('ipspace') } return port @na_utils.trace def _broadcast_domain_exists(self, domain, ipspace): """Check if a broadcast domain exists.""" api_args = { 'query': { 'net-port-broadcast-domain-info': { 'ipspace': ipspace, 'broadcast-domain': domain, }, }, 'desired-attributes': { 'net-port-broadcast-domain-info': None, }, } result = self.send_iter_request('net-port-broadcast-domain-get-iter', api_args) return self._has_records(result) @na_utils.trace def _create_broadcast_domain(self, domain, ipspace, mtu=1500): """Create a broadcast domain.""" api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'mtu': mtu, } self.send_request('net-port-broadcast-domain-create', api_args) @na_utils.trace def _delete_broadcast_domain(self, domain, ipspace): """Delete a broadcast domain.""" api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, } self.send_request('net-port-broadcast-domain-destroy', api_args) def _delete_broadcast_domains_for_ipspace(self, ipspace_name): """Deletes all broadcast domains in an IPspace.""" ipspaces = self.get_ipspaces(ipspace_name=ipspace_name) if not ipspaces: return ipspace = ipspaces[0] for broadcast_domain_name in ipspace['broadcast-domains']: self._delete_broadcast_domain(broadcast_domain_name, ipspace_name) @na_utils.trace def _add_port_to_broadcast_domain(self, node, port, domain, ipspace): qualified_port_name = ':'.join([node, port]) try: api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'ports': { 'net-qualified-port-name': qualified_port_name, } } self.send_request('net-port-broadcast-domain-add-ports', api_args) except netapp_api.NaApiError as e: if e.code == (netapp_api. E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN): LOG.debug('Port %(port)s already exists in broadcast domain ' '%(domain)s', {'port': port, 'domain': domain}) else: msg = _('Failed to add port %(port)s to broadcast domain ' '%(domain)s. %(err_msg)s') msg_args = { 'port': qualified_port_name, 'domain': domain, 'err_msg': e.message, } raise exception.NetAppException(msg % msg_args) @na_utils.trace def _remove_port_from_broadcast_domain(self, node, port, domain, ipspace): qualified_port_name = ':'.join([node, port]) api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'ports': { 'net-qualified-port-name': qualified_port_name, } } self.send_request('net-port-broadcast-domain-remove-ports', api_args) @na_utils.trace def network_interface_exists(self, vserver_name, node, port, ip, netmask, vlan): """Checks if LIF exists.""" home_port_name = (port if not vlan else '%(port)s-%(tag)s' % {'port': port, 'tag': vlan}) api_args = { 'query': { 'net-interface-info': { 'address': ip, 'home-node': node, 'home-port': home_port_name, 'netmask': netmask, 'vserver': vserver_name, }, }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, }, }, } result = self.send_iter_request('net-interface-get-iter', api_args) return self._has_records(result) @na_utils.trace def list_network_interfaces(self): """Get the names of available LIFs.""" api_args = { 'desired-attributes': { 'net-interface-info': { 'interface-name': None, }, }, } result = self.send_iter_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [lif_info.get_child_content('interface-name') for lif_info in lif_info_list.get_children()] @na_utils.trace def get_network_interfaces(self, protocols=None): """Get available LIFs.""" protocols = na_utils.convert_to_list(protocols) protocols = [protocol.lower() for protocol in protocols] api_args = { 'query': { 'net-interface-info': { 'data-protocols': { 'data-protocol': '|'.join(protocols), } } } } if protocols else None result = self.send_iter_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') interfaces = [] for lif_info in lif_info_list.get_children(): lif = { 'address': lif_info.get_child_content('address'), 'home-node': lif_info.get_child_content('home-node'), 'home-port': lif_info.get_child_content('home-port'), 'interface-name': lif_info.get_child_content('interface-name'), 'netmask': lif_info.get_child_content('netmask'), 'role': lif_info.get_child_content('role'), 'vserver': lif_info.get_child_content('vserver'), } interfaces.append(lif) return interfaces @na_utils.trace def delete_network_interface(self, interface_name): """Deletes LIF.""" api_args = {'vserver': None, 'interface-name': interface_name} self.send_request('net-interface-delete', api_args) @na_utils.trace def get_ipspaces(self, ipspace_name=None): """Gets one or more IPSpaces.""" if not self.features.IPSPACES: return [] api_args = {} if ipspace_name: api_args['query'] = { 'net-ipspaces-info': { 'ipspace': ipspace_name, } } result = self.send_iter_request('net-ipspaces-get-iter', api_args) if not self._has_records(result): return [] ipspaces = [] for net_ipspaces_info in result.get_child_by_name( 'attributes-list').get_children(): ipspace = { 'ports': [], 'vservers': [], 'broadcast-domains': [], } ports = net_ipspaces_info.get_child_by_name( 'ports') or netapp_api.NaElement('none') for port in ports.get_children(): ipspace['ports'].append(port.get_content()) vservers = net_ipspaces_info.get_child_by_name( 'vservers') or netapp_api.NaElement('none') for vserver in vservers.get_children(): ipspace['vservers'].append(vserver.get_content()) broadcast_domains = net_ipspaces_info.get_child_by_name( 'broadcast-domains') or netapp_api.NaElement('none') for broadcast_domain in broadcast_domains.get_children(): ipspace['broadcast-domains'].append( broadcast_domain.get_content()) ipspace['ipspace'] = net_ipspaces_info.get_child_content('ipspace') ipspace['id'] = net_ipspaces_info.get_child_content('id') ipspace['uuid'] = net_ipspaces_info.get_child_content('uuid') ipspaces.append(ipspace) return ipspaces @na_utils.trace def ipspace_exists(self, ipspace_name): """Checks if IPspace exists.""" if not self.features.IPSPACES: return False api_args = { 'query': { 'net-ipspaces-info': { 'ipspace': ipspace_name, }, }, 'desired-attributes': { 'net-ipspaces-info': { 'ipspace': None, }, }, } result = self.send_iter_request('net-ipspaces-get-iter', api_args) return self._has_records(result) @na_utils.trace def create_ipspace(self, ipspace_name): """Creates an IPspace.""" api_args = {'ipspace': ipspace_name} self.send_request('net-ipspaces-create', api_args) @na_utils.trace def delete_ipspace(self, ipspace_name): """Deletes an IPspace.""" self._delete_broadcast_domains_for_ipspace(ipspace_name) api_args = {'ipspace': ipspace_name} self.send_request('net-ipspaces-destroy', api_args) @na_utils.trace def add_vserver_to_ipspace(self, ipspace_name, vserver_name): """Assigns a vserver to an IPspace.""" api_args = {'ipspace': ipspace_name, 'vserver': vserver_name} self.send_request('net-ipspaces-assign-vserver', api_args) @na_utils.trace def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: return None else: raise e if len(aggrs) < 1: return None aggr_ownership_attrs = aggrs[0].get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') return aggr_ownership_attrs.get_child_content('home-name') @na_utils.trace def get_cluster_aggregate_capacities(self, aggregate_names): """Calculates capacity of one or more aggregates. Returns dictionary of aggregate capacity metrics. 'size-used' is the actual space consumed on the aggregate. 'size-available' is the actual space remaining. 'size-total' is the defined total aggregate size, such that used + available = total. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-available': None, 'size-total': None, 'size-used': None, }, }, } aggrs = self._get_aggregates(aggregate_names=aggregate_names, desired_attributes=desired_attributes) aggr_space_dict = dict() for aggr in aggrs: aggr_name = aggr.get_child_content('aggregate-name') aggr_space_attrs = aggr.get_child_by_name('aggr-space-attributes') aggr_space_dict[aggr_name] = { 'available': int(aggr_space_attrs.get_child_content('size-available')), 'total': int(aggr_space_attrs.get_child_content('size-total')), 'used': int(aggr_space_attrs.get_child_content('size-used')), } return aggr_space_dict @na_utils.trace def get_vserver_aggregate_capacities(self, aggregate_names=None): """Calculates capacity of one or more aggregates for a vserver. Returns dictionary of aggregate capacity metrics. This must be called against a Vserver LIF. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} api_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-aggr-info-list': { 'vserver-aggr-info': { 'aggr-name': None, 'aggr-availsize': None, }, }, }, }, } result = self.send_request('vserver-get', api_args) attributes = result.get_child_by_name('attributes') if not attributes: raise exception.NetAppException('Failed to read Vserver info') vserver_info = attributes.get_child_by_name('vserver-info') vserver_name = vserver_info.get_child_content('vserver-name') vserver_aggr_info_element = vserver_info.get_child_by_name( 'vserver-aggr-info-list') or netapp_api.NaElement('none') vserver_aggr_info_list = vserver_aggr_info_element.get_children() if not vserver_aggr_info_list: LOG.warning(_LW('No aggregates assigned to Vserver %s.'), vserver_name) # Return dict of key-value pair of aggr_name:aggr_size_available. aggr_space_dict = {} for aggr_info in vserver_aggr_info_list: aggr_name = aggr_info.get_child_content('aggr-name') if aggregate_names is None or aggr_name in aggregate_names: aggr_size = int(aggr_info.get_child_content('aggr-availsize')) aggr_space_dict[aggr_name] = {'available': aggr_size} LOG.debug('Found available Vserver aggregates: %s', aggr_space_dict) return aggr_space_dict @na_utils.trace def _get_aggregates(self, aggregate_names=None, desired_attributes=None): query = { 'aggr-attributes': { 'aggregate-name': '|'.join(aggregate_names), } } if aggregate_names else None api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('aggr-get-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() @na_utils.trace def setup_security_services(self, security_services, vserver_client, vserver_name): api_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'} ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'} ], 'vserver-name': vserver_name, } self.send_request('vserver-modify', api_args) for security_service in security_services: if security_service['type'].lower() == 'ldap': vserver_client.configure_ldap(security_service) elif security_service['type'].lower() == 'active_directory': vserver_client.configure_active_directory(security_service, vserver_name) elif security_service['type'].lower() == 'kerberos': self.create_kerberos_realm(security_service) vserver_client.configure_kerberos(security_service, vserver_name) else: msg = _('Unsupported security service type %s for ' 'Data ONTAP driver') raise exception.NetAppException(msg % security_service['type']) @na_utils.trace def enable_nfs(self): """Enables NFS on Vserver.""" self.send_request('nfs-enable') self.send_request('nfs-service-modify', {'is-nfsv40-enabled': 'true'}) api_args = { 'client-match': '0.0.0.0/0', 'policy-name': 'default', 'ro-rule': { 'security-flavor': 'any', }, 'rw-rule': { 'security-flavor': 'never', }, } self.send_request('export-rule-create', api_args) @na_utils.trace def configure_ldap(self, security_service): """Configures LDAP on Vserver.""" config_name = hashlib.md5(six.b(security_service['id'])).hexdigest() api_args = { 'ldap-client-config': config_name, 'servers': { 'ip-address': security_service['server'], }, 'tcp-port': '389', 'schema': 'RFC-2307', 'bind-password': security_service['password'], } self.send_request('ldap-client-create', api_args) api_args = {'client-config': config_name, 'client-enabled': 'true'} self.send_request('ldap-config-create', api_args) @na_utils.trace def configure_active_directory(self, security_service, vserver_name): """Configures AD on Vserver.""" self.configure_dns(security_service) # 'cifs-server' is CIFS Server NetBIOS Name, max length is 15. # Should be unique within each domain (data['domain']). cifs_server = (vserver_name[0:7] + '..' + vserver_name[-6:]).upper() api_args = { 'admin-username': security_service['user'], 'admin-password': security_service['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'domain': security_service['domain'], } try: LOG.debug("Trying to setup CIFS server with data: %s", api_args) self.send_request('cifs-server-create', api_args) except netapp_api.NaApiError as e: msg = _("Failed to create CIFS server entry. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def create_kerberos_realm(self, security_service): """Creates Kerberos realm on cluster.""" api_args = { 'admin-server-ip': security_service['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'config-name': security_service['id'], 'kdc-ip': security_service['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': security_service['server'], 'password-server-port': '464', 'realm': security_service['domain'].upper(), } try: self.send_request('kerberos-realm-create', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EDUPLICATEENTRY: LOG.debug('Kerberos realm config already exists.') else: msg = _('Failed to create Kerberos realm. %s') raise exception.NetAppException(msg % e.message) @na_utils.trace def configure_kerberos(self, security_service, vserver_name): """Configures Kerberos for NFS on Vserver.""" self.configure_dns(security_service) spn = self._get_kerberos_service_principal_name( security_service, vserver_name) lifs = self.list_network_interfaces() if not lifs: msg = _("Cannot set up Kerberos. There are no LIFs configured.") raise exception.NetAppException(msg) for lif_name in lifs: api_args = { 'admin-password': security_service['password'], 'admin-user-name': security_service['user'], 'interface-name': lif_name, 'is-kerberos-enabled': 'true', 'service-principal-name': spn, } self.send_request('kerberos-config-modify', api_args) @na_utils.trace def _get_kerberos_service_principal_name(self, security_service, vserver_name): return 'nfs/' + vserver_name.replace('_', '-') + '.' + \ security_service['domain'] + '@' + \ security_service['domain'].upper() @na_utils.trace def configure_dns(self, security_service): api_args = { 'domains': { 'string': security_service['domain'], }, 'name-servers': { 'ip-address': security_service['dns_ip'], }, 'dns-state': 'enabled', } try: self.send_request('net-dns-create', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EDUPLICATEENTRY: LOG.error(_LE("DNS exists for Vserver.")) else: msg = _("Failed to configure DNS. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def create_volume(self, aggregate_name, volume_name, size_gb, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None, snapshot_reserve=None, volume_type='rw'): """Creates a volume.""" api_args = { 'containing-aggr-name': aggregate_name, 'size': six.text_type(size_gb) + 'g', 'volume': volume_name, 'volume-type': volume_type, } if volume_type != 'dp': api_args['junction-path'] = '/%s' % volume_name if thin_provisioned: api_args['space-reserve'] = 'none' if snapshot_policy is not None: api_args['snapshot-policy'] = snapshot_policy if language is not None: api_args['language-code'] = language if snapshot_reserve is not None: api_args['percentage-snapshot-reserve'] = six.text_type( snapshot_reserve) self.send_request('volume-create', api_args) # cDOT compression requires that deduplication be enabled. if dedup_enabled or compression_enabled: self.enable_dedup(volume_name) if compression_enabled: self.enable_compression(volume_name) if max_files is not None: self.set_volume_max_files(volume_name, max_files) @na_utils.trace def enable_dedup(self, volume_name): """Enable deduplication on volume.""" api_args = {'path': '/vol/%s' % volume_name} self.send_request('sis-enable', api_args) @na_utils.trace def disable_dedup(self, volume_name): """Disable deduplication on volume.""" api_args = {'path': '/vol/%s' % volume_name} self.send_request('sis-disable', api_args) @na_utils.trace def enable_compression(self, volume_name): """Enable compression on volume.""" api_args = { 'path': '/vol/%s' % volume_name, 'enable-compression': 'true' } self.send_request('sis-set-config', api_args) @na_utils.trace def disable_compression(self, volume_name): """Disable compression on volume.""" api_args = { 'path': '/vol/%s' % volume_name, 'enable-compression': 'false' } self.send_request('sis-set-config', api_args) @na_utils.trace def get_volume_efficiency_status(self, volume_name): """Get dedupe & compression status for a volume.""" api_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % volume_name, }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, }, }, } result = self.send_iter_request('sis-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') sis_status_info = attributes_list.get_child_by_name( 'sis-status-info') or netapp_api.NaElement('none') return { 'dedupe': True if 'enabled' == sis_status_info.get_child_content( 'state') else False, 'compression': True if 'true' == sis_status_info.get_child_content( 'is-compression-enabled') else False, } @na_utils.trace def set_volume_max_files(self, volume_name, max_files): """Set flexvol file limit.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': max_files, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace def set_volume_size(self, volume_name, size_gb): """Set volume size.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'size': int(size_gb) * units.Gi, }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if errors: raise netapp_api.NaApiError( errors[0].get_child_content('error-code'), errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_name(self, volume_name, new_volume_name): """Set flexvol name.""" api_args = { 'volume': volume_name, 'new-volume-name': new_volume_name, } self.send_request('volume-rename', api_args) @na_utils.trace def manage_volume(self, aggregate_name, volume_name, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None): """Update volume as needed to bring under management as a share.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': aggregate_name, 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': {}, 'volume-language-attributes': {}, 'volume-snapshot-attributes': {}, 'volume-space-attributes': { 'space-guarantee': ('none' if thin_provisioned else 'volume') }, }, }, } if language: api_args['attributes']['volume-attributes'][ 'volume-language-attributes']['language'] = language if max_files: api_args['attributes']['volume-attributes'][ 'volume-inode-attributes']['files-total'] = max_files if snapshot_policy: api_args['attributes']['volume-attributes'][ 'volume-snapshot-attributes'][ 'snapshot-policy'] = snapshot_policy self.send_request('volume-modify-iter', api_args) # Efficiency options must be handled separately self.update_volume_efficiency_attributes(volume_name, dedup_enabled, compression_enabled) @na_utils.trace def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, compression_enabled): """Update dedupe & compression attributes to match desired values.""" efficiency_status = self.get_volume_efficiency_status(volume_name) if efficiency_status['compression'] != compression_enabled: if compression_enabled: self.enable_compression(volume_name) else: self.disable_compression(volume_name) if efficiency_status['dedupe'] != dedup_enabled: if dedup_enabled: self.enable_dedup(volume_name) else: self.disable_dedup(volume_name) @na_utils.trace def volume_exists(self, volume_name): """Checks if volume exists.""" LOG.debug('Checking if volume %s exists', volume_name) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) return self._has_records(result) @na_utils.trace def get_aggregate_for_volume(self, volume_name): """Get the name of the aggregate containing a volume.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') aggregate = volume_id_attributes.get_child_content( 'containing-aggregate-name') if not aggregate: msg = _('Could not find aggregate for volume %s.') raise exception.NetAppException(msg % volume_name) return aggregate @na_utils.trace def volume_has_luns(self, volume_name): """Checks if volume has LUNs.""" LOG.debug('Checking if volume %s has LUNs', volume_name) api_args = { 'query': { 'lun-info': { 'volume': volume_name, }, }, 'desired-attributes': { 'lun-info': { 'path': None, }, }, } result = self.send_iter_request('lun-get-iter', api_args) return self._has_records(result) @na_utils.trace def volume_has_junctioned_volumes(self, volume_name): """Checks if volume has volumes mounted beneath its junction path.""" junction_path = self.get_volume_junction_path(volume_name) if not junction_path: return False api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': junction_path + '/*', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) return self._has_records(result) @na_utils.trace def get_volume_at_junction_path(self, junction_path): """Returns the volume with the specified junction path, if present.""" if not junction_path: return None api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': junction_path, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, }, 'volume-space-attributes': { 'size': None, } }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') volume = { 'aggregate': volume_id_attributes.get_child_content( 'containing-aggregate-name'), 'junction-path': volume_id_attributes.get_child_content( 'junction-path'), 'name': volume_id_attributes.get_child_content('name'), 'type': volume_id_attributes.get_child_content('type'), 'style': volume_id_attributes.get_child_content('style'), 'size': volume_space_attributes.get_child_content('size'), } return volume @na_utils.trace def get_volume_to_manage(self, aggregate_name, volume_name): """Get flexvol to be managed by Manila.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': aggregate_name, 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, }, 'volume-space-attributes': { 'size': None, } }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') volume = { 'aggregate': volume_id_attributes.get_child_content( 'containing-aggregate-name'), 'junction-path': volume_id_attributes.get_child_content( 'junction-path'), 'name': volume_id_attributes.get_child_content('name'), 'type': volume_id_attributes.get_child_content('type'), 'style': volume_id_attributes.get_child_content('style'), 'size': volume_space_attributes.get_child_content('size'), } return volume @na_utils.trace def create_volume_clone(self, volume_name, parent_volume_name, parent_snapshot_name=None): """Clones a volume.""" api_args = { 'volume': volume_name, 'parent-volume': parent_volume_name, 'parent-snapshot': parent_snapshot_name, 'junction-path': '/%s' % volume_name, } self.send_request('volume-clone-create', api_args) @na_utils.trace def split_volume_clone(self, volume_name): """Begins splitting a clone from its parent.""" try: api_args = {'volume': volume_name} self.send_request('volume-clone-split-start', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOL_CLONE_BEING_SPLIT: return raise @na_utils.trace def get_clone_children_for_snapshot(self, volume_name, snapshot_name): """Returns volumes that are keeping a snapshot locked.""" api_args = { 'query': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'name': volume_name, 'snapshot-name': snapshot_name, }, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return [] volume_list = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for volume_attributes in attributes_list.get_children(): volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_list.append({ 'name': volume_id_attributes.get_child_content('name'), }) return volume_list @na_utils.trace def get_volume_junction_path(self, volume_name, is_style_cifs=False): """Gets a volume junction path.""" api_args = { 'volume': volume_name, 'is-style-cifs': six.text_type(is_style_cifs).lower(), } result = self.send_request('volume-get-volume-path', api_args) return result.get_child_content('junction') @na_utils.trace def mount_volume(self, volume_name, junction_path=None): """Mounts a volume on a junction path.""" api_args = { 'volume-name': volume_name, 'junction-path': (junction_path if junction_path else '/%s' % volume_name) } self.send_request('volume-mount', api_args) @na_utils.trace def offline_volume(self, volume_name): """Offlines a volume.""" try: self.send_request('volume-offline', {'name': volume_name}) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOLUMEOFFLINE: return raise @na_utils.trace def _unmount_volume(self, volume_name, force=False): """Unmounts a volume.""" api_args = { 'volume-name': volume_name, 'force': six.text_type(force).lower(), } try: self.send_request('volume-unmount', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOL_NOT_MOUNTED: return raise @na_utils.trace def unmount_volume(self, volume_name, force=False, wait_seconds=30): """Unmounts a volume, retrying if a clone split is ongoing. NOTE(cknight): While unlikely to happen in normal operation, any client that tries to delete volumes immediately after creating volume clones is likely to experience failures if cDOT isn't quite ready for the delete. The volume unmount is the first operation in the delete path that fails in this case, and there is no proactive check we can use to reliably predict the failure. And there isn't a specific error code from volume-unmount, so we have to check for a generic error code plus certain language in the error code. It's ugly, but it works, and it's better than hard-coding a fixed delay. """ # Do the unmount, handling split-related errors with retries. retry_interval = 3 # seconds for retry in range(int(wait_seconds / retry_interval)): try: self._unmount_volume(volume_name, force=force) LOG.debug('Volume %s unmounted.', volume_name) return except netapp_api.NaApiError as e: if e.code == netapp_api.EAPIERROR and 'job ID' in e.message: msg = _LW('Could not unmount volume %(volume)s due to ' 'ongoing volume operation: %(exception)s') msg_args = {'volume': volume_name, 'exception': e} LOG.warning(msg, msg_args) time.sleep(retry_interval) continue raise msg = _('Failed to unmount volume %(volume)s after ' 'waiting for %(wait_seconds)s seconds.') msg_args = {'volume': volume_name, 'wait_seconds': wait_seconds} LOG.error(msg, msg_args) raise exception.NetAppException(msg % msg_args) @na_utils.trace def delete_volume(self, volume_name): """Deletes a volume.""" self.send_request('volume-destroy', {'name': volume_name}) @na_utils.trace def create_snapshot(self, volume_name, snapshot_name): """Creates a volume snapshot.""" api_args = {'volume': volume_name, 'snapshot': snapshot_name} self.send_request('snapshot-create', api_args) @na_utils.trace def snapshot_exists(self, snapshot_name, volume_name): """Checks if Snapshot exists for a specified volume.""" LOG.debug('Checking if snapshot %(snapshot)s exists for ' 'volume %(volume)s', {'snapshot': snapshot_name, 'volume': volume_name}) """Gets a single snapshot.""" api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.send_request('snapshot-get-iter', api_args) error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(msg % msg_args) else: raise exception.NetAppException(msg % msg_args) return self._has_records(result) @na_utils.trace def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.send_request('snapshot-get-iter', api_args) error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(msg % msg_args) else: raise exception.NetAppException(msg % msg_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') snapshot_info_list = attributes_list.get_children() if not self._has_records(result): raise exception.SnapshotResourceNotFound(name=snapshot_name) elif len(snapshot_info_list) > 1: msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.NetAppException(msg % msg_args) snapshot_info = snapshot_info_list[0] snapshot = { 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'busy': strutils.bool_from_string( snapshot_info.get_child_content('busy')), } snapshot_owners_list = snapshot_info.get_child_by_name( 'snapshot-owners-list') or netapp_api.NaElement('none') snapshot_owners = set([ snapshot_owner.get_child_content('owner') for snapshot_owner in snapshot_owners_list.get_children()]) snapshot['owners'] = snapshot_owners return snapshot @na_utils.trace def rename_snapshot(self, volume_name, snapshot_name, new_snapshot_name): api_args = { 'volume': volume_name, 'current-name': snapshot_name, 'new-name': new_snapshot_name } self.send_request('snapshot-rename', api_args) @na_utils.trace def delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot.""" api_args = {'volume': volume_name, 'snapshot': snapshot_name} self.send_request('snapshot-delete', api_args) @na_utils.trace def soft_delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot, or renames it if delete fails.""" try: self.delete_snapshot(volume_name, snapshot_name) except netapp_api.NaApiError: self.rename_snapshot(volume_name, snapshot_name, DELETED_PREFIX + snapshot_name) msg = _('Soft-deleted snapshot %(snapshot)s on volume %(volume)s.') msg_args = {'snapshot': snapshot_name, 'volume': volume_name} LOG.info(msg, msg_args) @na_utils.trace def prune_deleted_snapshots(self): """Deletes non-busy snapshots that were previously soft-deleted.""" deleted_snapshots_map = self._get_deleted_snapshots() for vserver in deleted_snapshots_map: client = copy.deepcopy(self) client.set_vserver(vserver) for snapshot in deleted_snapshots_map[vserver]: try: client.delete_snapshot(snapshot['volume'], snapshot['name']) except netapp_api.NaApiError: msg = _('Could not delete snapshot %(snap)s on ' 'volume %(volume)s.') msg_args = { 'snap': snapshot['name'], 'volume': snapshot['volume'], } LOG.exception(msg, msg_args) @na_utils.trace def _get_deleted_snapshots(self): """Returns non-busy, soft-deleted snapshots suitable for reaping.""" api_args = { 'query': { 'snapshot-info': { 'name': DELETED_PREFIX + '*', 'busy': 'false', }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'vserver': None, 'volume': None, }, }, } result = self.send_iter_request('snapshot-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') # Build a map of snapshots, one list of snapshots per vserver snapshot_map = {} for snapshot_info in attributes_list.get_children(): vserver = snapshot_info.get_child_content('vserver') snapshot_list = snapshot_map.get(vserver, []) snapshot_list.append({ 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'vserver': vserver, }) snapshot_map[vserver] = snapshot_list return snapshot_map @na_utils.trace def create_cg_snapshot(self, volume_names, snapshot_name): """Creates a consistency group snapshot of one or more flexvols.""" cg_id = self._start_cg_snapshot(volume_names, snapshot_name) if not cg_id: msg = _('Could not start consistency group snapshot %s.') raise exception.NetAppException(msg % snapshot_name) self._commit_cg_snapshot(cg_id) @na_utils.trace def _start_cg_snapshot(self, volume_names, snapshot_name): api_args = { 'snapshot': snapshot_name, 'timeout': 'relaxed', 'volumes': [ {'volume-name': volume_name} for volume_name in volume_names ], } result = self.send_request('cg-start', api_args) return result.get_child_content('cg-id') @na_utils.trace def _commit_cg_snapshot(self, cg_id): api_args = {'cg-id': cg_id} self.send_request('cg-commit', api_args) @na_utils.trace def create_cifs_share(self, share_name): share_path = '/%s' % share_name api_args = {'path': share_path, 'share-name': share_name} self.send_request('cifs-share-create', api_args) @na_utils.trace def get_cifs_share_access(self, share_name): api_args = { 'query': { 'cifs-share-access-control': { 'share': share_name, }, }, 'desired-attributes': { 'cifs-share-access-control': { 'user-or-group': None, 'permission': None, }, }, } result = self.send_iter_request('cifs-share-access-control-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') rules = {} for rule in attributes_list.get_children(): user_or_group = rule.get_child_content('user-or-group') permission = rule.get_child_content('permission') rules[user_or_group] = permission return rules @na_utils.trace def add_cifs_share_access(self, share_name, user_name, readonly): api_args = { 'permission': 'read' if readonly else 'full_control', 'share': share_name, 'user-or-group': user_name, } self.send_request('cifs-share-access-control-create', api_args) @na_utils.trace def modify_cifs_share_access(self, share_name, user_name, readonly): api_args = { 'permission': 'read' if readonly else 'full_control', 'share': share_name, 'user-or-group': user_name, } self.send_request('cifs-share-access-control-modify', api_args) @na_utils.trace def remove_cifs_share_access(self, share_name, user_name): api_args = {'user-or-group': user_name, 'share': share_name} self.send_request('cifs-share-access-control-delete', api_args) @na_utils.trace def remove_cifs_share(self, share_name): self.send_request('cifs-share-delete', {'share-name': share_name}) @na_utils.trace def add_nfs_export_rule(self, policy_name, client_match, readonly): rule_indices = self._get_nfs_export_rule_indices(policy_name, client_match) if not rule_indices: self._add_nfs_export_rule(policy_name, client_match, readonly) else: # Update first rule and delete the rest self._update_nfs_export_rule( policy_name, client_match, readonly, rule_indices.pop(0)) self._remove_nfs_export_rules(policy_name, rule_indices) @na_utils.trace def _add_nfs_export_rule(self, policy_name, client_match, readonly): api_args = { 'policy-name': policy_name, 'client-match': client_match, 'ro-rule': { 'security-flavor': 'sys', }, 'rw-rule': { 'security-flavor': 'sys' if not readonly else 'never', }, 'super-user-security': { 'security-flavor': 'sys', }, } self.send_request('export-rule-create', api_args) @na_utils.trace def _update_nfs_export_rule(self, policy_name, client_match, readonly, rule_index): api_args = { 'policy-name': policy_name, 'rule-index': rule_index, 'client-match': client_match, 'ro-rule': { 'security-flavor': 'sys' }, 'rw-rule': { 'security-flavor': 'sys' if not readonly else 'never' }, 'super-user-security': { 'security-flavor': 'sys' }, } self.send_request('export-rule-modify', api_args) @na_utils.trace def _get_nfs_export_rule_indices(self, policy_name, client_match): api_args = { 'query': { 'export-rule-info': { 'policy-name': policy_name, 'client-match': client_match, }, }, 'desired-attributes': { 'export-rule-info': { 'vserver-name': None, 'policy-name': None, 'client-match': None, 'rule-index': None, }, }, } result = self.send_iter_request('export-rule-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') export_rule_info_list = attributes_list.get_children() rule_indices = [int(export_rule_info.get_child_content('rule-index')) for export_rule_info in export_rule_info_list] rule_indices.sort() return [six.text_type(rule_index) for rule_index in rule_indices] @na_utils.trace def remove_nfs_export_rule(self, policy_name, client_match): rule_indices = self._get_nfs_export_rule_indices(policy_name, client_match) self._remove_nfs_export_rules(policy_name, rule_indices) @na_utils.trace def _remove_nfs_export_rules(self, policy_name, rule_indices): for rule_index in rule_indices: api_args = { 'policy-name': policy_name, 'rule-index': rule_index } try: self.send_request('export-rule-destroy', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise @na_utils.trace def clear_nfs_export_policy_for_volume(self, volume_name): self.set_nfs_export_policy_for_volume(volume_name, 'default') @na_utils.trace def set_nfs_export_policy_for_volume(self, volume_name, policy_name): api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': policy_name, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace def get_nfs_export_policy_for_volume(self, volume_name): """Get the name of the export policy for a volume.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_export_attributes = volume_attributes.get_child_by_name( 'volume-export-attributes') or netapp_api.NaElement('none') export_policy = volume_export_attributes.get_child_content('policy') if not export_policy: msg = _('Could not find export policy for volume %s.') raise exception.NetAppException(msg % volume_name) return export_policy @na_utils.trace def create_nfs_export_policy(self, policy_name): api_args = {'policy-name': policy_name} try: self.send_request('export-policy-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EDUPLICATEENTRY: raise @na_utils.trace def soft_delete_nfs_export_policy(self, policy_name): try: self.delete_nfs_export_policy(policy_name) except netapp_api.NaApiError: # NOTE(cknight): Policy deletion can fail if called too soon after # removing from a flexvol. So rename for later harvesting. self.rename_nfs_export_policy(policy_name, DELETED_PREFIX + policy_name) @na_utils.trace def delete_nfs_export_policy(self, policy_name): api_args = {'policy-name': policy_name} try: self.send_request('export-policy-destroy', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: return raise @na_utils.trace def rename_nfs_export_policy(self, policy_name, new_policy_name): api_args = { 'policy-name': policy_name, 'new-policy-name': new_policy_name } self.send_request('export-policy-rename', api_args) @na_utils.trace def prune_deleted_nfs_export_policies(self): deleted_policy_map = self._get_deleted_nfs_export_policies() for vserver in deleted_policy_map: client = copy.deepcopy(self) client.set_vserver(vserver) for policy in deleted_policy_map[vserver]: try: client.delete_nfs_export_policy(policy) except netapp_api.NaApiError: LOG.debug('Could not delete export policy %s.' % policy) @na_utils.trace def _get_deleted_nfs_export_policies(self): api_args = { 'query': { 'export-policy-info': { 'policy-name': DELETED_PREFIX + '*', }, }, 'desired-attributes': { 'export-policy-info': { 'policy-name': None, 'vserver': None, }, }, } result = self.send_iter_request('export-policy-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') policy_map = {} for export_info in attributes_list.get_children(): vserver = export_info.get_child_content('vserver') policies = policy_map.get(vserver, []) policies.append(export_info.get_child_content('policy-name')) policy_map[vserver] = policies return policy_map @na_utils.trace def _get_ems_log_destination_vserver(self): """Returns the best vserver destination for EMS messages.""" major, minor = self.get_ontapi_version(cached=True) if (major > 1) or (major == 1 and minor > 15): # Prefer admin Vserver (requires cluster credentials). admin_vservers = self.list_vservers(vserver_type='admin') if admin_vservers: return admin_vservers[0] # Fall back to data Vserver. data_vservers = self.list_vservers(vserver_type='data') if data_vservers: return data_vservers[0] # If older API version, or no other Vservers found, use node Vserver. node_vservers = self.list_vservers(vserver_type='node') if node_vservers: return node_vservers[0] raise exception.NotFound("No Vserver found to receive EMS messages.") @na_utils.trace def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" node_client = copy.deepcopy(self) node_client.connection.set_timeout(25) try: node_client.set_vserver(self._get_ems_log_destination_vserver()) node_client.send_request('ems-autosupport-log', message_dict) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: LOG.warning(_LW('Failed to invoke EMS. %s') % e) @na_utils.trace def get_aggregate_raid_types(self, aggregate_names): """Get the RAID type of one or more aggregates.""" desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, }, }, } aggr_list = self._get_aggregates(aggregate_names=aggregate_names, desired_attributes=desired_attributes) aggr_raid_dict = {} for aggr in aggr_list: aggr_name = aggr.get_child_content('aggregate-name') aggr_raid_attrs = aggr.get_child_by_name('aggr-raid-attributes') aggr_raid_dict[aggr_name] = aggr_raid_attrs.get_child_content( 'raid-type') return aggr_raid_dict @na_utils.trace def get_aggregate_disk_types(self, aggregate_names): """Get the disk type of one or more aggregates.""" aggr_disk_type_dict = {} for aggregate_name in aggregate_names: # Only get 1 disk, since apart from hybrid aggregates all disks # must be the same type. api_args = { 'max-records': 1, 'query': { 'storage-disk-info': { 'disk-raid-info': { 'disk-aggregate-info': { 'aggregate-name': aggregate_name, }, }, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } result = self.send_request('storage-disk-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') storage_disk_info_list = attributes_list.get_children() if len(storage_disk_info_list) >= 1: storage_disk_info = storage_disk_info_list[0] disk_raid_info = storage_disk_info.get_child_by_name( 'disk-raid-info') if disk_raid_info: disk_type = disk_raid_info.get_child_content( 'effective-disk-type') if disk_type: aggr_disk_type_dict[aggregate_name] = disk_type return aggr_disk_type_dict @na_utils.trace def check_for_cluster_credentials(self): try: self.list_cluster_nodes() # API succeeded, so definitely a cluster management LIF return True except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: LOG.debug('Not connected to cluster management LIF.') return False else: raise e @na_utils.trace def create_cluster_peer(self, addresses, username=None, password=None, passphrase=None): """Creates a cluster peer relationship.""" api_args = { 'peer-addresses': [ {'remote-inet-address': address} for address in addresses ], } if username: api_args['user-name'] = username if password: api_args['password'] = password if passphrase: api_args['passphrase'] = passphrase self.send_request('cluster-peer-create', api_args) @na_utils.trace def get_cluster_peers(self, remote_cluster_name=None): """Gets one or more cluster peer relationships.""" api_args = {} if remote_cluster_name: api_args['query'] = { 'cluster-peer-info': { 'remote-cluster-name': remote_cluster_name, } } result = self.send_iter_request('cluster-peer-get-iter', api_args) if not self._has_records(result): return [] cluster_peers = [] for cluster_peer_info in result.get_child_by_name( 'attributes-list').get_children(): cluster_peer = { 'active-addresses': [], 'peer-addresses': [] } active_addresses = cluster_peer_info.get_child_by_name( 'active-addresses') or netapp_api.NaElement('none') for address in active_addresses.get_children(): cluster_peer['active-addresses'].append(address.get_content()) peer_addresses = cluster_peer_info.get_child_by_name( 'peer-addresses') or netapp_api.NaElement('none') for address in peer_addresses.get_children(): cluster_peer['peer-addresses'].append(address.get_content()) cluster_peer['availability'] = cluster_peer_info.get_child_content( 'availability') cluster_peer['cluster-name'] = cluster_peer_info.get_child_content( 'cluster-name') cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content( 'cluster-uuid') cluster_peer['remote-cluster-name'] = ( cluster_peer_info.get_child_content('remote-cluster-name')) cluster_peer['serial-number'] = ( cluster_peer_info.get_child_content('serial-number')) cluster_peer['timeout'] = cluster_peer_info.get_child_content( 'timeout') cluster_peers.append(cluster_peer) return cluster_peers @na_utils.trace def delete_cluster_peer(self, cluster_name): """Deletes a cluster peer relationship.""" api_args = {'cluster-name': cluster_name} self.send_request('cluster-peer-delete', api_args) @na_utils.trace def get_cluster_peer_policy(self): """Gets the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return {} result = self.send_request('cluster-peer-policy-get') attributes = result.get_child_by_name( 'attributes') or netapp_api.NaElement('none') cluster_peer_policy = attributes.get_child_by_name( 'cluster-peer-policy') or netapp_api.NaElement('none') policy = { 'is-unauthenticated-access-permitted': cluster_peer_policy.get_child_content( 'is-unauthenticated-access-permitted'), 'passphrase-minimum-length': cluster_peer_policy.get_child_content( 'passphrase-minimum-length'), } if policy['is-unauthenticated-access-permitted'] is not None: policy['is-unauthenticated-access-permitted'] = ( strutils.bool_from_string( policy['is-unauthenticated-access-permitted'])) if policy['passphrase-minimum-length'] is not None: policy['passphrase-minimum-length'] = int( policy['passphrase-minimum-length']) return policy @na_utils.trace def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None, passphrase_minimum_length=None): """Modifies the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return if (is_unauthenticated_access_permitted is None and passphrase_minimum_length is None): return api_args = {} if is_unauthenticated_access_permitted is not None: api_args['is-unauthenticated-access-permitted'] = ( 'true' if strutils.bool_from_string( is_unauthenticated_access_permitted) else 'false') if passphrase_minimum_length is not None: api_args['passphrase-minlength'] = six.text_type( passphrase_minimum_length) self.send_request('cluster-peer-policy-modify', api_args) @na_utils.trace def create_vserver_peer(self, vserver_name, peer_vserver_name): """Creates a Vserver peer relationship for SnapMirrors.""" api_args = { 'vserver': vserver_name, 'peer-vserver': peer_vserver_name, 'applications': [ {'vserver-peer-application': 'snapmirror'}, ], } self.send_request('vserver-peer-create', api_args) @na_utils.trace def delete_vserver_peer(self, vserver_name, peer_vserver_name): """Deletes a Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.send_request('vserver-peer-delete', api_args) @na_utils.trace def accept_vserver_peer(self, vserver_name, peer_vserver_name): """Accepts a pending Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.send_request('vserver-peer-accept', api_args) @na_utils.trace def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): """Gets one or more Vserver peer relationships.""" api_args = None if vserver_name or peer_vserver_name: api_args = {'query': {'vserver-peer-info': {}}} if vserver_name: api_args['query']['vserver-peer-info']['vserver'] = ( vserver_name) if peer_vserver_name: api_args['query']['vserver-peer-info']['peer-vserver'] = ( peer_vserver_name) result = self.send_iter_request('vserver-peer-get-iter', api_args) if not self._has_records(result): return [] vserver_peers = [] for vserver_peer_info in result.get_child_by_name( 'attributes-list').get_children(): vserver_peer = { 'vserver': vserver_peer_info.get_child_content('vserver'), 'peer-vserver': vserver_peer_info.get_child_content('peer-vserver'), 'peer-state': vserver_peer_info.get_child_content('peer-state'), 'peer-cluster': vserver_peer_info.get_child_content('peer-cluster'), } vserver_peers.append(vserver_peer) return vserver_peers def _ensure_snapmirror_v2(self): """Verify support for SnapMirror control plane v2.""" if not self.features.SNAPMIRROR_V2: msg = _('SnapMirror features require Data ONTAP 8.2 or later.') raise exception.NetAppException(msg) @na_utils.trace def create_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, schedule=None, policy=None, relationship_type='data_protection'): """Creates a SnapMirror relationship (cDOT 8.2 or later only).""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'relationship-type': relationship_type, } if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy try: self.send_request('snapmirror-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_EXISTS: raise @na_utils.trace def initialize_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship (cDOT 8.2 or later only).""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } if source_snapshot: api_args['source-snapshot'] = source_snapshot if transfer_priority: api_args['transfer-priority'] = transfer_priority result = self.send_request('snapmirror-initialize', api_args) result_info = {} result_info['operation-id'] = result.get_child_content( 'result-operation-id') result_info['status'] = result.get_child_content('result-status') result_info['jobid'] = result.get_child_content('result-jobid') result_info['error-code'] = result.get_child_content( 'result-error-code') result_info['error-message'] = result.get_child_content( 'result-error-message') return result_info @na_utils.trace def release_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" self._ensure_snapmirror_v2() api_args = { 'query': { 'snapmirror-destination-info': { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'relationship-info-only': ('true' if relationship_info_only else 'false'), } } } self.send_request('snapmirror-release-iter', api_args) @na_utils.trace def quiesce_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Disables future transfers to a SnapMirror destination.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.send_request('snapmirror-quiesce', api_args) @na_utils.trace def abort_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'clear-checkpoint': 'true' if clear_checkpoint else 'false', } try: self.send_request('snapmirror-abort', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS: raise @na_utils.trace def break_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Breaks a data protection SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.send_request('snapmirror-break', api_args) @na_utils.trace def modify_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, schedule=None, policy=None, tries=None, max_transfer_rate=None): """Modifies a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy if tries is not None: api_args['tries'] = tries if max_transfer_rate is not None: api_args['max-transfer-rate'] = max_transfer_rate self.send_request('snapmirror-modify', api_args) @na_utils.trace def delete_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Destroys a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'query': { 'snapmirror-info': { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } } } self.send_request('snapmirror-destroy-iter', api_args) @na_utils.trace def update_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Schedules a snapmirror update.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } try: self.send_request('snapmirror-update', api_args) except netapp_api.NaApiError as e: if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and e.code != netapp_api.EANOTHER_OP_ACTIVE): raise @na_utils.trace def resume_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resume a SnapMirror relationship if it is quiesced.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } try: self.send_request('snapmirror-resume', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_NOT_QUIESCED: raise @na_utils.trace def resync_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resync a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.send_request('snapmirror-resync', api_args) @na_utils.trace def _get_snapmirrors(self, source_vserver=None, source_volume=None, destination_vserver=None, destination_volume=None, desired_attributes=None): query = None if (source_vserver or source_volume or destination_vserver or destination_volume): query = {'snapmirror-info': {}} if source_volume: query['snapmirror-info']['source-volume'] = source_volume if destination_volume: query['snapmirror-info']['destination-volume'] = ( destination_volume) if source_vserver: query['snapmirror-info']['source-vserver'] = source_vserver if destination_vserver: query['snapmirror-info']['destination-vserver'] = ( destination_vserver) api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('snapmirror-get-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() @na_utils.trace def get_snapmirrors(self, source_vserver, source_volume, destination_vserver, destination_volume, desired_attributes=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. Desired attributes should be a flat list of attribute names. """ self._ensure_snapmirror_v2() if desired_attributes is not None: desired_attributes = { 'snapmirror-info': {attr: None for attr in desired_attributes}, } result = self._get_snapmirrors( source_vserver=source_vserver, source_volume=source_volume, destination_vserver=destination_vserver, destination_volume=destination_volume, desired_attributes=desired_attributes) snapmirrors = [] for snapmirror_info in result: snapmirror = {} for child in snapmirror_info.get_children(): name = self._strip_xml_namespace(child.get_name()) snapmirror[name] = child.get_content() snapmirrors.append(snapmirror) return snapmirrors manila-2.0.0/manila/share/drivers/netapp/dataontap/client/__init__.py0000664000567000056710000000000012701407107026752 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/netapp/dataontap/client/client_base.py0000664000567000056710000001004612701407107027476 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import excutils from manila.i18n import _LE from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppBaseClient(object): def __init__(self, **kwargs): self.connection = netapp_api.NaServer( host=kwargs['hostname'], transport_type=kwargs['transport_type'], port=kwargs['port'], username=kwargs['username'], password=kwargs['password'], trace=kwargs.get('trace', False)) def get_ontapi_version(self, cached=True): """Gets the supported ontapi version.""" if cached: return self.connection.get_api_version() result = self.send_request('system-get-ontapi-version', enable_tunneling=False) major = result.get_child_content('major-version') minor = result.get_child_content('minor-version') return major, minor @na_utils.trace def get_system_version(self): """Gets the current Data ONTAP version.""" result = self.send_request('system-get-version') version_tuple = result.get_child_by_name( 'version-tuple') or netapp_api.NaElement('none') system_version_tuple = version_tuple.get_child_by_name( 'system-version-tuple') or netapp_api.NaElement('none') version = {} version['version'] = result.get_child_content('version') version['version-tuple'] = ( system_version_tuple.get_child_content('generation'), system_version_tuple.get_child_content('major'), system_version_tuple.get_child_content('minor')) return version def _init_features(self): """Set up the repository of available Data ONTAP features.""" self.features = Features() def _strip_xml_namespace(self, string): if string.startswith('{') and '}' in string: return string.split('}', 1)[1] return string def send_request(self, api_name, api_args=None, enable_tunneling=True): """Sends request to Ontapi.""" request = netapp_api.NaElement(api_name) if api_args: request.translate_struct(api_args) return self.connection.invoke_successfully(request, enable_tunneling) @na_utils.trace def get_licenses(self): try: result = self.send_request('license-v2-list-info') except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Could not get licenses list. %s."), e) return sorted( [l.get_child_content('package').lower() for l in result.get_child_by_name('licenses').get_children()]) def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" raise NotImplementedError() class Features(object): def __init__(self): self.defined_features = set() def add_feature(self, name, supported=True): if not isinstance(supported, bool): raise TypeError("Feature value must be a bool type.") self.defined_features.add(name) setattr(self, name, supported) def __getattr__(self, name): # NOTE(cknight): Needed to keep pylint happy. raise AttributeError manila-2.0.0/manila/share/drivers/netapp/dataontap/client/api.py0000664000567000056710000005471212701407107026007 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp API for Data ONTAP and OnCommand DFM. Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. """ import copy from lxml import etree from oslo_log import log import six from six.moves import urllib from manila import exception from manila.i18n import _ LOG = log.getLogger(__name__) EONTAPI_EINVAL = '22' EAPIERROR = '13001' EAPINOTFOUND = '13005' ESNAPSHOTNOTALLOWED = '13023' EVOLUMEOFFLINE = '13042' EINTERNALERROR = '13114' EDUPLICATEENTRY = '13130' EVOLNOTCLONE = '13170' EVOL_NOT_MOUNTED = '14716' ESIS_CLONE_NOT_LICENSED = '14956' EOBJECTNOTFOUND = '15661' E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605' ERELATION_EXISTS = '17122' ENOTRANSFER_IN_PROGRESS = '17130' ETRANSFER_IN_PROGRESS = '17137' EANOTHER_OP_ACTIVE = '17131' ERELATION_NOT_QUIESCED = '17127' ESOURCE_IS_DIFFERENT = '17105' EVOL_CLONE_BEING_SPLIT = '17151' class NaServer(object): """Encapsulates server connection logic.""" TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' SERVER_TYPE_FILER = 'filer' SERVER_TYPE_DFM = 'dfm' URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' URL_DFM = 'apis/XMLrequest' NETAPP_NS = 'http://www.netapp.com/filer/admin' STYLE_LOGIN_PASSWORD = 'basic_auth' STYLE_CERTIFICATE = 'certificate_auth' def __init__(self, host, server_type=SERVER_TYPE_FILER, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, username=None, password=None, port=None, trace=False): self._host = host self.set_server_type(server_type) self.set_transport_type(transport_type) self.set_style(style) if port: self.set_port(port) self._username = username self._password = password self._trace = trace self._refresh_conn = True self._trace = trace LOG.debug('Using NetApp controller: %s', self._host) def get_transport_type(self): """Get the transport type protocol.""" return self._protocol def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if transport_type.lower() not in ( NaServer.TRANSPORT_TYPE_HTTP, NaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(80) else: self.set_port(8088) else: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(443) else: self.set_port(8488) self._refresh_conn = True def get_style(self): """Get the authorization style for communicating with the server.""" return self._auth_style def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, NaServer.STYLE_CERTIFICATE): raise ValueError('Unsupported authentication style') self._auth_style = style.lower() def get_server_type(self): """Get the target server type.""" return self._server_type def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, NaServer.SERVER_TYPE_DFM): raise ValueError('Unsupported server type') self._server_type = server_type.lower() if self._server_type == NaServer.SERVER_TYPE_FILER: self._url = NaServer.URL_FILER else: self._url = NaServer.URL_DFM self._ns = NaServer.NETAPP_NS self._refresh_conn = True def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = six.text_type(major) + "." + \ six.text_type(minor) except ValueError: raise ValueError('Major and minor versions must be integers') self._refresh_conn = True def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def set_port(self, port): """Set the server communication port.""" try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = six.text_type(port) self._refresh_conn = True def get_port(self): """Get the server communication port.""" return self._port def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def get_timeout(self): """Gets the timeout in seconds if set.""" if hasattr(self, '_timeout'): return self._timeout return None def get_vfiler(self): """Get the vfiler to use in tunneling.""" return self._vfiler def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self._vfiler = vfiler def get_vserver(self): """Get the vserver to use in tunneling.""" return self._vserver def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver def set_username(self, username): """Set the user name for authentication.""" self._username = username self._refresh_conn = True def set_password(self, password): """Set the password for authentication.""" self._password = password self._refresh_conn = True def set_trace(self, trace=True): """Enable or disable the API tracing facility.""" self._trace = trace def invoke_elem(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if na_element and not isinstance(na_element, NaElement): ValueError('NaElement must be supplied to invoke API') request, request_element = self._create_request(na_element, enable_tunneling) if self._trace: LOG.debug("Request: %s", request_element.to_string(pretty=True)) if not hasattr(self, '_opener') or not self._opener \ or self._refresh_conn: self._build_opener() try: if hasattr(self, '_timeout'): response = self._opener.open(request, timeout=self._timeout) else: response = self._opener.open(request) except urllib.error.HTTPError as e: raise NaApiError(e.code, e.msg) except urllib.error.URLError as e: raise exception.StorageCommunicationException(six.text_type(e)) except Exception as e: raise NaApiError(message=e) response_xml = response.read() response_element = self._get_result(response_xml) if self._trace: LOG.debug("Response: %s", response_element.to_string(pretty=True)) return response_element def invoke_successfully(self, na_element, enable_tunneling=False): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ result = self.invoke_elem(na_element, enable_tunneling) if result.has_attr('status') and result.get_attr('status') == 'passed': return result code = result.get_attr('errno')\ or result.get_child_content('errorno')\ or 'ESTATUSFAILED' if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = result.get_attr('reason')\ or result.get_child_content('reason')\ or 'Execution status is failed due to unknown reason' raise NaApiError(code, msg) def _create_request(self, na_element, enable_tunneling=False): """Creates request in the desired format.""" netapp_elem = NaElement('netapp') netapp_elem.add_attr('xmlns', self._ns) if hasattr(self, '_api_version'): netapp_elem.add_attr('version', self._api_version) if enable_tunneling: self._enable_tunnel_request(netapp_elem) netapp_elem.add_child_elem(na_element) request_d = netapp_elem.to_string() request = urllib.request.Request( self._get_url(), data=request_d, headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) return request, netapp_elem def _enable_tunnel_request(self, netapp_elem): """Enables vserver or vfiler tunneling.""" if hasattr(self, '_vfiler') and self._vfiler: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 7: netapp_elem.add_attr('vfiler', self._vfiler) else: raise ValueError('ontapi version has to be atleast 1.7' ' to send request to vfiler') if hasattr(self, '_vserver') and self._vserver: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 15: netapp_elem.add_attr('vfiler', self._vserver) else: raise ValueError('ontapi version has to be atleast 1.15' ' to send request to vserver') def _parse_response(self, response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml) def _get_result(self, response): """Gets the call result.""" processed_response = self._parse_response(response) return processed_response.get_child_by_name('results') def _get_url(self): return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, self._url) def _build_opener(self): if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: auth_handler = self._create_basic_auth_handler() else: auth_handler = self._create_certificate_auth_handler() opener = urllib.request.build_opener(auth_handler) self._opener = opener def _create_basic_auth_handler(self): password_man = urllib.request.HTTPPasswordMgrWithDefaultRealm() password_man.add_password(None, self._get_url(), self._username, self._password) auth_handler = urllib.request.HTTPBasicAuthHandler(password_man) return auth_handler def _create_certificate_auth_handler(self): raise NotImplementedError() def __str__(self): return "server: %s" % (self._host) class NaElement(object): """Class wraps basic building block for NetApp API request.""" def __init__(self, name): """Name of the element or etree.Element.""" if isinstance(name, etree._Element): self._element = name else: self._element = etree.Element(name) def get_name(self): """Returns the tag name of the element.""" return self._element.tag def set_content(self, text): """Set the text string for the element.""" self._element.text = text def get_content(self): """Get the text for the element.""" return self._element.text def add_attr(self, name, value): """Add the attribute to the element.""" self._element.set(name, value) def add_attrs(self, **attrs): """Add multiple attributes to the element.""" for attr in attrs.keys(): self._element.set(attr, attrs.get(attr)) def add_child_elem(self, na_element): """Add the child element to the element.""" if isinstance(na_element, NaElement): self._element.append(na_element._element) return raise ValueError(_("Can only add elements of type NaElement.")) def get_child_by_name(self, name): """Get the child element by the tag name.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return NaElement(child) return None def get_child_content(self, name): """Get the content of the child.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None def get_children(self): """Get the children for the element.""" return [NaElement(el) for el in self._element.iterchildren()] def has_attr(self, name): """Checks whether element has attribute.""" attributes = self._element.attrib or {} return name in attributes.keys() def get_attr(self, name): """Get the attribute with the given name.""" attributes = self._element.attrib or {} return attributes.get(name) def get_attr_names(self): """Returns the list of attribute names.""" attributes = self._element.attrib or {} return attributes.keys() def add_new_child(self, name, content, convert=False): """Add child with tag name and context. Convert replaces entity refs to chars. """ child = NaElement(name) if convert: content = NaElement._convert_entity_refs(content) child.set_content(content) self.add_child_elem(child) @staticmethod def _convert_entity_refs(text): """Converts entity refs to chars to handle etree auto conversions.""" text = text.replace("<", "<") text = text.replace(">", ">") return text @staticmethod def create_node_with_children(node, **children): """Creates and returns named node with children.""" parent = NaElement(node) for child in children.keys(): parent.add_new_child(child, children.get(child, None)) return parent def add_node_with_children(self, node, **children): """Creates named node with children.""" parent = NaElement.create_node_with_children(node, **children) self.add_child_elem(parent) def to_string(self, pretty=False, method='xml', encoding='UTF-8'): """Prints the element to string.""" return etree.tostring(self._element, method=method, encoding=encoding, pretty_print=pretty) def __getitem__(self, key): """Dict getter method for NaElement. Returns NaElement list if present, text value in case no NaElement node children or attribute value if present. """ child = self.get_child_by_name(key) if child: if child.get_children(): return child else: return child.get_content() elif self.has_attr(key): return self.get_attr(key) raise KeyError(_('No element by given name %s.') % (key)) def __setitem__(self, key, value): """Dict setter method for NaElement. Accepts dict, list, tuple, str, int, float and long as valid value. """ if key: if value: if isinstance(value, NaElement): child = NaElement(key) child.add_child_elem(value) self.add_child_elem(child) elif isinstance( value, six.string_types + six.integer_types + (float, )): self.add_new_child(key, six.text_type(value)) elif isinstance(value, (list, tuple, dict)): child = NaElement(key) child.translate_struct(value) self.add_child_elem(child) else: raise TypeError(_('Not a valid value for NaElement.')) else: self.add_child_elem(NaElement(key)) else: raise KeyError(_('NaElement name cannot be null.')) def translate_struct(self, data_struct): """Convert list, tuple, dict to NaElement and appends. Example usage: 1. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', 'elem3': 'vl3'}) 2. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, {'elem1': 'vl3'}]) """ if isinstance(data_struct, (list, tuple)): for el in data_struct: if isinstance(el, (list, tuple, dict)): self.translate_struct(el) else: self.add_child_elem(NaElement(el)) elif isinstance(data_struct, dict): for k in data_struct.keys(): child = NaElement(k) if isinstance(data_struct[k], (dict, list, tuple)): child.translate_struct(data_struct[k]) else: if data_struct[k]: child.set_content(six.text_type(data_struct[k])) self.add_child_elem(child) else: raise ValueError(_('Type cannot be converted into NaElement.')) class NaApiError(Exception): """Base exception class for NetApp API errors.""" def __init__(self, code='unknown', message='unknown'): self.code = code self.message = message def __str__(self, *args, **kwargs): return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) def invoke_api(na_server, api_name, api_family='cm', query=None, des_result=None, additional_elems=None, is_iter=False, records=0, tag=None, timeout=0, tunnel=None): """Invokes any given API call to a NetApp server. :param na_server: na_server instance :param api_name: API name string :param api_family: cm or 7m :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param records: limit for records, 0 for infinite :param timeout: timeout seconds :param tunnel: tunnel entity, vserver or vfiler name """ record_step = 50 if not (na_server or isinstance(na_server, NaServer)): msg = _("Requires an NaServer instance.") raise exception.InvalidInput(reason=msg) server = copy.copy(na_server) if api_family == 'cm': server.set_vserver(tunnel) else: server.set_vfiler(tunnel) if timeout > 0: server.set_timeout(timeout) iter_records = 0 cond = True while cond: na_element = create_api_request( api_name, query, des_result, additional_elems, is_iter, record_step, tag) result = server.invoke_successfully(na_element, True) if is_iter: if records > 0: iter_records = iter_records + record_step if iter_records >= records: cond = False tag_el = result.get_child_by_name('next-tag') tag = tag_el.get_content() if tag_el else None if not tag: cond = False else: cond = False yield result def create_api_request(api_name, query=None, des_result=None, additional_elems=None, is_iter=False, record_step=50, tag=None): """Creates a NetApp API request. :param api_name: API name string :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param record_step: records at a time for iter API :param tag: next tag for iter API """ api_el = NaElement(api_name) if query: query_el = NaElement('query') query_el.translate_struct(query) api_el.add_child_elem(query_el) if des_result: res_el = NaElement('desired-attributes') res_el.translate_struct(des_result) api_el.add_child_elem(res_el) if additional_elems: api_el.translate_struct(additional_elems) if is_iter: api_el.add_new_child('max-records', six.text_type(record_step)) if tag: api_el.add_new_child('tag', tag, True) return api_el manila-2.0.0/manila/share/drivers/ganesha/0000775000567000056710000000000012701407265021546 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/ganesha/utils.py0000664000567000056710000000476412701407107023266 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pipes from oslo_concurrency import processutils from manila import utils def patch(base, *overlays): """Recursive dictionary patching.""" for ovl in overlays: for k, v in ovl.items(): if isinstance(v, dict) and isinstance(base.get(k), dict): patch(base[k], v) else: base[k] = v return base def walk(dct): """Recursive iteration over dictionary.""" for k, v in dct.items(): if isinstance(v, dict): for w in walk(v): yield w else: yield k, v class RootExecutor(object): """Execute wrapper defaulting to root execution.""" def __init__(self, execute=utils.execute): self.execute = execute def __call__(self, *args, **kwargs): exkwargs = {"run_as_root": True} exkwargs.update(kwargs) return self.execute(*args, **exkwargs) class SSHExecutor(object): """Callable encapsulating exec through ssh.""" def __init__(self, *args, **kwargs): self.pool = utils.SSHPool(*args, **kwargs) def __call__(self, *args, **kwargs): # argument with identifier 'run_as_root=' is not accepted by # processutils's ssh_execute() method unlike processutils's execute() # method. So implement workaround to enable or disable 'run as root' # behavior. run_as_root = kwargs.pop('run_as_root', False) cmd = ' '.join(pipes.quote(a) for a in args) if run_as_root: cmd = ' '.join(['sudo', cmd]) ssh = self.pool.get() try: ret = processutils.ssh_execute(ssh, cmd, **kwargs) finally: self.pool.put(ssh) return ret def path_from(fpath, *rpath): """Return the join of the dir of fpath and rpath in absolute form.""" return os.path.join(os.path.abspath(os.path.dirname(fpath)), *rpath) manila-2.0.0/manila/share/drivers/ganesha/__init__.py0000664000567000056710000001157512701407107023663 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import errno import os import re from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _LI from manila.share.drivers.ganesha import manager as ganesha_manager from manila.share.drivers.ganesha import utils as ganesha_utils CONF = cfg.CONF LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, config, **kwargs): self.configuration = config self._execute = execute def init_helper(self): """Initializes protocol-specific NAS drivers.""" @abc.abstractmethod def allow_access(self, base_path, share, access): """Allow access to the host.""" @abc.abstractmethod def deny_access(self, base_path, share, access): """Deny access to the host.""" class GaneshaNASHelper(NASHelperBase): """Execute commands relating to Shares.""" def __init__(self, execute, config, tag='', **kwargs): super(GaneshaNASHelper, self).__init__(execute, config, **kwargs) self.tag = tag _confrx = re.compile('\.(conf|json)\Z') def _load_conf_dir(self, dirpath, must_exist=True): """Load Ganesha config files in dirpath in alphabetic order.""" try: dirlist = os.listdir(dirpath) except OSError as e: if e.errno != errno.ENOENT or must_exist: raise dirlist = [] LOG.info(_LI('Loading Ganesha config from %s.'), dirpath) conf_files = list(filter(self._confrx.search, dirlist)) conf_files.sort() export_template = {} for conf_file in conf_files: with open(os.path.join(dirpath, conf_file)) as f: ganesha_utils.patch( export_template, ganesha_manager.parseconf(f.read())) return export_template def init_helper(self): """Initializes protocol-specific NAS drivers.""" self.ganesha = ganesha_manager.GaneshaManager( self._execute, self.tag, ganesha_config_path=self.configuration.ganesha_config_path, ganesha_export_dir=self.configuration.ganesha_export_dir, ganesha_db_path=self.configuration.ganesha_db_path, ganesha_service_name=self.configuration.ganesha_service_name) system_export_template = self._load_conf_dir( self.configuration.ganesha_export_template_dir, must_exist=False) if system_export_template: self.export_template = system_export_template else: self.export_template = self._default_config_hook() def _default_config_hook(self): """The default export block. Subclass this to add FSAL specific defaults. Suggested approach: take the return value of superclass' method, patch with dict containing your defaults, and return the result. However, you can also provide your defaults from scratch with no regard to superclass. """ return self._load_conf_dir(ganesha_utils.path_from(__file__, "conf")) def _fsal_hook(self, base_path, share, access): """Subclass this to create FSAL block.""" return {} def allow_access(self, base_path, share, access): """Allow access to the share.""" if access['access_type'] != 'ip': raise exception.InvalidShareAccess('Only IP access type allowed') cf = {} accid = access['id'] name = share['name'] export_name = "%s--%s" % (name, accid) ganesha_utils.patch(cf, self.export_template, { 'EXPORT': { 'Export_Id': self.ganesha.get_export_id(), 'Path': os.path.join(base_path, name), 'Pseudo': os.path.join(base_path, export_name), 'Tag': accid, 'CLIENT': { 'Clients': access['access_to'] }, 'FSAL': self._fsal_hook(base_path, share, access) } }) self.ganesha.add_export(export_name, cf) def deny_access(self, base_path, share, access): """Deny access to the share.""" self.ganesha.remove_export("%s--%s" % (share['name'], access['id'])) manila-2.0.0/manila/share/drivers/ganesha/conf/0000775000567000056710000000000012701407265022473 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/ganesha/conf/00-base-export-template.conf0000664000567000056710000000223412701407107027615 0ustar jenkinsjenkins00000000000000# This is a Ganesha config template. # Syntactically, a valid Ganesha config # file, but some values in it are stubs. # Fields that have stub values are managed # by Manila; the stubs are of two kinds: # - @config: # value will be taken from Manila config # - @runtime: # value will be determined at runtime # User is free to set Ganesha parameters # which are not reserved to Manila by # stubbing. EXPORT { # Each EXPORT must have a unique Export_Id. Export_Id = @runtime; # The directory in the exported file system this export # is rooted on. Path = @runtime; # FSAL, Ganesha's module component FSAL { # FSAL name Name = @config; } # Path of export in the NFSv4 pseudo filesystem Pseudo = @runtime; # RPC security flavor, one of none, sys, krb5{,i,p} SecType = sys; # Alternative export identifier for NFSv3 Tag = @runtime; # Client specification CLIENT { # Comma separated list of clients Clients = @runtime; # Access type, one of RW, RO, MDONLY, MDONLY_RO, NONE Access_Type = RW; } # User id squashing, one of None, Root, All Squash = None; } manila-2.0.0/manila/share/drivers/ganesha/manager.py0000664000567000056710000003057012701407107023532 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pipes import re import sys from oslo_log import log from oslo_serialization import jsonutils import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.share.drivers.ganesha import utils as ganesha_utils from manila import utils LOG = log.getLogger(__name__) IWIDTH = 4 def _conf2json(conf): """Convert Ganesha config to JSON.""" # tokenize config string token_list = [six.StringIO()] state = { 'in_quote': False, 'in_comment': False, 'escape': False, } cbk = [] for char in conf: if state['in_quote']: if not state['escape']: if char == '"': state['in_quote'] = False cbk.append(lambda: token_list.append(six.StringIO())) elif char == '\\': cbk.append(lambda: state.update({'escape': True})) else: if char == "#": state['in_comment'] = True if state['in_comment']: if char == "\n": state['in_comment'] = False else: if char == '"': token_list.append(six.StringIO()) state['in_quote'] = True state['escape'] = False if not state['in_comment']: token_list[-1].write(char) while cbk: cbk.pop(0)() if state['in_quote']: raise RuntimeError("Unterminated quoted string") # jsonify tokens js_token_list = ["{"] for tok in token_list: tok = tok.getvalue() if tok[0] == '"': js_token_list.append(tok) continue for pat, s in [ # add omitted "=" signs to block openings ('([^=\s])\s*{', '\\1={'), # delete trailing semicolons in blocks (';\s*}', '}'), # add omitted semicolons after blocks ('}\s*([^}\s])', '};\\1'), # separate syntactically significant characters ('([;{}=])', ' \\1 ')]: tok = re.sub(pat, s, tok) # map tokens to JSON equivalents for word in tok.split(): if word == "=": word = ":" elif word == ";": word = ',' elif (word in ['{', '}'] or re.search('\A-?[1-9]\d*(\.\d+)?\Z', word)): pass else: word = jsonutils.dumps(word) js_token_list.append(word) js_token_list.append("}") # group quoted strings token_grp_list = [] for tok in js_token_list: if tok[0] == '"': if not (token_grp_list and isinstance(token_grp_list[-1], list)): token_grp_list.append([]) token_grp_list[-1].append(tok) else: token_grp_list.append(tok) # process quoted string groups by joining them js_token_list2 = [] for x in token_grp_list: if isinstance(x, list): x = ''.join(['"'] + [tok[1:-1] for tok in x] + ['"']) js_token_list2.append(x) return ''.join(js_token_list2) def _dump_to_conf(confdict, out=sys.stdout, indent=0): """Output confdict in Ganesha config format.""" if isinstance(confdict, dict): for k, v in confdict.items(): if v is None: continue out.write(' ' * (indent * IWIDTH) + k + ' ') if isinstance(v, dict): out.write("{\n") _dump_to_conf(v, out, indent + 1) out.write(' ' * (indent * IWIDTH) + '}') else: out.write('= ') _dump_to_conf(v, out, indent) out.write(';') out.write('\n') else: dj = jsonutils.dumps(confdict) if confdict == dj[1:-1]: out.write(confdict) else: out.write(dj) def parseconf(conf): """Parse Ganesha config. Both native format and JSON are supported. """ try: # allow config to be specified in JSON -- # for sake of people who might feel Ganesha config foreign. d = jsonutils.loads(conf) except ValueError: d = jsonutils.loads(_conf2json(conf)) return d def mkconf(confdict): """Create Ganesha config string from confdict.""" s = six.StringIO() _dump_to_conf(confdict, s) return s.getvalue() class GaneshaManager(object): """Ganesha instrumentation class.""" def __init__(self, execute, tag, **kwargs): self.confrx = re.compile('\.conf\Z') self.ganesha_config_path = kwargs['ganesha_config_path'] self.tag = tag def _execute(*args, **kwargs): msg = kwargs.pop('message', args[0]) makelog = kwargs.pop('makelog', True) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if makelog: LOG.error( _LE("Error while executing management command on " "Ganesha node %(tag)s: %(msg)s."), {'tag': tag, 'msg': msg}) raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) self.execute = _execute self.ganesha_export_dir = kwargs['ganesha_export_dir'] self.execute('mkdir', '-p', self.ganesha_export_dir) self.ganesha_db_path = kwargs['ganesha_db_path'] self.execute('mkdir', '-p', os.path.dirname(self.ganesha_db_path)) self.ganesha_service = kwargs['ganesha_service_name'] # Here we are to make sure that an SQLite database of the # required scheme exists at self.ganesha_db_path. # The following command gets us there -- provided the file # does not yet exist (otherwise it just fails). However, # we don't care about this condition, we just execute the # command unconditionally (ignoring failure). Instead we # directly query the db right after, to check its validity. self.execute("sqlite3", self.ganesha_db_path, 'create table ganesha(key varchar(20) primary key, ' 'value int); insert into ganesha values("exportid", ' '100);', run_as_root=False, check_exit_code=False) self.get_export_id(bump=False) # Starting from empty state. State will be rebuilt in a later # stage of service initialization. self.reset_exports() self.restart_service() def _getpath(self, name): """Get the path of config file for name.""" return os.path.join(self.ganesha_export_dir, name + ".conf") def _write_file(self, path, data): """Write data to path atomically.""" dirpath, fname = (getattr(os.path, q + "name")(path) for q in ("dir", "base")) tmpf = self.execute('mktemp', '-p', dirpath, "-t", fname + ".XXXXXX")[0][:-1] self.execute( 'sh', '-c', 'echo %s > %s' % (pipes.quote(data), pipes.quote(tmpf)), message='writing ' + tmpf) self.execute('mv', tmpf, path) def _write_conf_file(self, name, data): """Write data to config file for name atomically.""" path = self._getpath(name) self._write_file(path, data) return path def _mkindex(self): """Generate the index file for current exports.""" @utils.synchronized("ganesha-index-" + self.tag, external=True) def _mkindex(): files = filter(lambda f: self.confrx.search(f) and f != "INDEX.conf", self.execute('ls', self.ganesha_export_dir, run_as_root=False)[0].split("\n")) index = "".join(map(lambda f: "%include " + os.path.join( self.ganesha_export_dir, f) + "\n", files)) self._write_conf_file("INDEX", index) _mkindex() def _read_export_file(self, name): """Return the dict of the export identified by name.""" return parseconf(self.execute("cat", self._getpath(name), message='reading export ' + name)[0]) def _write_export_file(self, name, confdict): """Write confdict to the export file of name.""" for k, v in ganesha_utils.walk(confdict): # values in the export block template that need to be # filled in by Manila are pre-fixed by '@' if isinstance(v, six.string_types) and v[0] == '@': msg = _("Incomplete export block: value %(val)s of attribute " "%(key)s is a stub.") % {'key': k, 'val': v} raise exception.InvalidParameterValue(err=msg) return self._write_conf_file(name, mkconf(confdict)) def _rm_export_file(self, name): """Remove export file of name.""" self.execute("rm", self._getpath(name)) def _dbus_send_ganesha(self, method, *args, **kwargs): """Send a message to Ganesha via dbus.""" service = kwargs.pop("service", "exportmgr") self.execute("dbus-send", "--print-reply", "--system", "--dest=org.ganesha.nfsd", "/org/ganesha/nfsd/ExportMgr", "org.ganesha.nfsd.%s.%s" % (service, method), *args, message='dbus call %s.%s' % (service, method), **kwargs) def _remove_export_dbus(self, xid): """Remove an export from Ganesha runtime with given export id.""" self._dbus_send_ganesha("RemoveExport", "uint16:%d" % xid) def add_export(self, name, confdict): """Add an export to Ganesha specified by confdict.""" xid = confdict["EXPORT"]["Export_Id"] undos = [] _mkindex_called = False try: path = self._write_export_file(name, confdict) undos.append(lambda: self._rm_export_file(name)) self._dbus_send_ganesha("AddExport", "string:" + path, "string:EXPORT(Export_Id=%d)" % xid) undos.append(lambda: self._remove_export_dbus(xid)) _mkindex_called = True self._mkindex() except Exception: for u in undos: u() if not _mkindex_called: self._mkindex() raise def remove_export(self, name): """Remove an export from Ganesha.""" try: confdict = self._read_export_file(name) self._remove_export_dbus(confdict["EXPORT"]["Export_Id"]) finally: self._rm_export_file(name) self._mkindex() def get_export_id(self, bump=True): """Get a new export id.""" # XXX overflowing the export id (16 bit unsigned integer) # is not handled if bump: bumpcode = 'update ganesha set value = value + 1;' else: bumpcode = '' out = self.execute( "sqlite3", self.ganesha_db_path, bumpcode + 'select * from ganesha where key = "exportid";', run_as_root=False)[0] match = re.search('\Aexportid\|(\d+)$', out) if not match: LOG.error(_LE("Invalid export database on " "Ganesha node %(tag)s: %(db)s."), {'tag': self.tag, 'db': self.ganesha_db_path}) raise exception.InvalidSqliteDB() return int(match.groups()[0]) def restart_service(self): """Restart the Ganesha service.""" self.execute("service", self.ganesha_service, "restart") def reset_exports(self): """Delete all export files.""" self.execute('sh', '-c', 'rm -f %s/*.conf' % pipes.quote(self.ganesha_export_dir)) self._mkindex() manila-2.0.0/manila/share/drivers/zfsonlinux/0000775000567000056710000000000012701407265022357 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/zfsonlinux/utils.py0000664000567000056710000002501412701407107024066 0ustar jenkinsjenkins00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module for storing ZFSonLinux driver utility stuff such as: - Common ZFS code - Share helpers """ # TODO(vponomaryov): add support of SaMBa import abc from oslo_log import log import six from manila.common import constants from manila import exception from manila.i18n import _, _LI, _LW from manila.share import driver from manila.share.drivers.ganesha import utils as ganesha_utils from manila import utils LOG = log.getLogger(__name__) def zfs_dataset_synchronized(f): def wrapped_func(self, *args, **kwargs): key = "zfs-dataset-%s" % args[0] @utils.synchronized(key, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func class ExecuteMixin(driver.ExecuteMixin): def init_execute_mixin(self, *args, **kwargs): """Init method for mixin called in the end of driver's __init__().""" super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs) if self.configuration.zfs_use_ssh: self.ssh_executor = ganesha_utils.SSHExecutor( ip=self.configuration.zfs_service_ip, port=22, conn_timeout=self.configuration.ssh_conn_timeout, login=self.configuration.zfs_ssh_username, password=self.configuration.zfs_ssh_user_password, privatekey=self.configuration.zfs_ssh_private_key_path, max_size=10, ) else: self.ssh_executor = None def execute(self, *cmd, **kwargs): """Common interface for running shell commands.""" executor = self._execute if self.ssh_executor: executor = self.ssh_executor if cmd[0] == 'sudo': kwargs['run_as_root'] = True cmd = cmd[1:] return executor(*cmd, **kwargs) @utils.retry(exception.ProcessExecutionError, interval=5, retries=36, backoff_rate=1) def execute_with_retry(self, *cmd, **kwargs): """Retry wrapper over common shell interface.""" try: return self.execute(*cmd, **kwargs) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to run command, got error: %s"), e) raise def _get_option(self, resource_name, option_name, pool_level=False): """Returns value of requested zpool or zfs dataset option.""" app = 'zpool' if pool_level else 'zfs' out, err = self.execute('sudo', app, 'get', option_name, resource_name) data = self.parse_zfs_answer(out) option = data[0]['VALUE'] return option def parse_zfs_answer(self, string): """Returns list of dicts with data returned by ZFS shell commands.""" lines = string.split('\n') if len(lines) < 2: return [] keys = list(filter(None, lines[0].split(' '))) data = [] for line in lines[1:]: values = list(filter(None, line.split(' '))) if not values: continue data.append(dict(zip(keys, values))) return data def get_zpool_option(self, zpool_name, option_name): """Returns value of requested zpool option.""" return self._get_option(zpool_name, option_name, True) def get_zfs_option(self, dataset_name, option_name): """Returns value of requested zfs dataset option.""" return self._get_option(dataset_name, option_name, False) def zfs(self, *cmd, **kwargs): """ZFS shell commands executor.""" return self.execute('sudo', 'zfs', *cmd, **kwargs) @six.add_metaclass(abc.ABCMeta) class NASHelperBase(object): """Base class for share helpers of 'ZFS on Linux' driver.""" def __init__(self, configuration): """Init share helper. :param configuration: share driver 'configuration' instance :return: share helper instance. """ self.configuration = configuration self.init_execute_mixin() # pylint: disable=E1101 self.verify_setup() @abc.abstractmethod def verify_setup(self): """Performs checks for required stuff.""" @abc.abstractmethod def create_exports(self, dataset_name): """Creates share exports.""" @abc.abstractmethod def get_exports(self, dataset_name, service): """Gets/reads share exports.""" @abc.abstractmethod def remove_exports(self, dataset_name): """Removes share exports.""" @abc.abstractmethod def update_access(self, dataset_name, access_rules, add_rules, delete_rules): """Update access rules for specified ZFS dataset.""" class NFSviaZFSHelper(ExecuteMixin, NASHelperBase): """Helper class for handling ZFS datasets as NFS shares. Kernel and Fuse versions of ZFS have different syntax for setting up access rules, and this Helper designed to satisfy both making autodetection. """ @property def is_kernel_version(self): """Says whether Kernel version of ZFS is used or not.""" if not hasattr(self, '_is_kernel_version'): try: self.execute('modinfo', 'zfs') self._is_kernel_version = True except exception.ProcessExecutionError as e: LOG.info( _LI("Looks like ZFS kernel module is absent. " "Assuming FUSE version is installed. Error: %s"), e) self._is_kernel_version = False return self._is_kernel_version def verify_setup(self): """Performs checks for required stuff.""" out, err = self.execute('which', 'exportfs') if not out: raise exception.ZFSonLinuxException( msg=_("Utility 'exportfs' is not installed.")) try: self.execute('sudo', 'exportfs') except exception.ProcessExecutionError as e: msg = _("Call of 'exportfs' utility returned error: %s") LOG.exception(msg, e) raise def create_exports(self, dataset_name): """Creates NFS share exports for given ZFS dataset.""" return self.get_exports(dataset_name) def get_exports(self, dataset_name): """Gets/reads NFS share export for given ZFS dataset.""" mountpoint = self.get_zfs_option(dataset_name, 'mountpoint') return [ { "path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint}, "metadata": { }, "is_admin_only": is_admin_only, } for ip, is_admin_only in ( (self.configuration.zfs_share_export_ip, False), (self.configuration.zfs_service_ip, True)) ] @zfs_dataset_synchronized def remove_exports(self, dataset_name): """Removes NFS share exports for given ZFS dataset.""" sharenfs = self.get_zfs_option(dataset_name, 'sharenfs') if sharenfs == 'off': return self.zfs("set", "sharenfs=off", dataset_name) def _get_parsed_access_to(self, access_to): netmask = utils.cidr_to_netmask(access_to) if netmask == '255.255.255.255': return access_to.split('/')[0] return access_to.split('/')[0] + '/' + netmask @zfs_dataset_synchronized def update_access(self, dataset_name, access_rules, add_rules, delete_rules, make_all_ro=False): """Update access rules for given ZFS dataset exported as NFS share.""" rw_rules = [] ro_rules = [] for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type allowed for NFS protocol.") raise exception.InvalidShareAccess(reason=msg) if (rule['access_level'] == constants.ACCESS_LEVEL_RW and not make_all_ro): rw_rules.append(self._get_parsed_access_to(rule['access_to'])) elif (rule['access_level'] in (constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO)): ro_rules.append(self._get_parsed_access_to(rule['access_to'])) else: msg = _("Unsupported access level provided - " "%s.") % rule['access_level'] raise exception.InvalidShareAccess(reason=msg) rules = [] if self.is_kernel_version: if rw_rules: rules.append( "rw=%s,no_root_squash" % ":".join(rw_rules)) if ro_rules: rules.append("ro=%s,no_root_squash" % ":".join(ro_rules)) rules_str = "sharenfs=" + (','.join(rules) or 'off') else: for rule in rw_rules: rules.append("%s:rw,no_root_squash" % rule) for rule in ro_rules: rules.append("%s:ro,no_root_squash" % rule) rules_str = "sharenfs=" + (' '.join(rules) or 'off') out, err = self.zfs('list', '-r', dataset_name.split('/')[0]) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: self.zfs("set", rules_str, dataset_name) break else: LOG.warning( _LW("Dataset with '%(name)s' NAME is absent on backend. " "Access rules were not applied."), {'name': dataset_name}) # NOTE(vponomaryov): Setting of ZFS share options does not remove rules # that were added and then removed. So, remove them explicitly. if delete_rules and access_rules: mountpoint = self.get_zfs_option(dataset_name, 'mountpoint') for rule in delete_rules: if rule['access_type'].lower() != 'ip': continue access_to = self._get_parsed_access_to(rule['access_to']) export_location = access_to + ':' + mountpoint self.execute('sudo', 'exportfs', '-u', export_location) manila-2.0.0/manila/share/drivers/zfsonlinux/__init__.py0000664000567000056710000000000012701407107024451 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/zfsonlinux/driver.py0000664000567000056710000014055312701407107024227 0ustar jenkinsjenkins00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module with ZFSonLinux share driver that utilizes ZFS filesystem resources and exports them as shares. """ import time from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from manila.common import constants from manila import exception from manila.i18n import _, _LI, _LW from manila.share import driver from manila.share.drivers.zfsonlinux import utils as zfs_utils from manila.share import share_types from manila.share import utils as share_utils from manila import utils zfsonlinux_opts = [ cfg.StrOpt( "zfs_share_export_ip", required=True, help="IP to be added to user-facing export location. Required."), cfg.StrOpt( "zfs_service_ip", required=True, help="IP to be added to admin-facing export location. Required."), cfg.ListOpt( "zfs_zpool_list", required=True, help="Specify list of zpools that are allowed to be used by backend. " "Can contain nested datasets. Examples: " "Without nested dataset: 'zpool_name'. " "With nested dataset: 'zpool_name/nested_dataset_name'. " "Required."), cfg.ListOpt( "zfs_dataset_creation_options", help="Define here list of options that should be applied " "for each dataset creation if needed. Example: " "compression=gzip,dedup=off. " "Note that, for secondary replicas option 'readonly' will be set " "to 'on' and for active replicas to 'off' in any way. " "Also, 'quota' will be equal to share size. Optional."), cfg.StrOpt( "zfs_dataset_name_prefix", default='manila_share_', help="Prefix to be used in each dataset name. Optional."), cfg.StrOpt( "zfs_dataset_snapshot_name_prefix", default='manila_share_snapshot_', help="Prefix to be used in each dataset snapshot name. Optional."), cfg.BoolOpt( "zfs_use_ssh", default=False, help="Remote ZFS storage hostname that should be used for SSH'ing. " "Optional."), cfg.StrOpt( "zfs_ssh_username", help="SSH user that will be used in 2 cases: " "1) By manila-share service in case it is located on different " "host than its ZFS storage. " "2) By manila-share services with other ZFS backends that " "perform replication. " "It is expected that SSH'ing will be key-based, passwordless. " "This user should be passwordless sudoer. Optional."), cfg.StrOpt( "zfs_ssh_user_password", secret=True, help="Password for user that is used for SSH'ing ZFS storage host. " "Not used for replication operations. They require " "passwordless SSH access. Optional."), cfg.StrOpt( "zfs_ssh_private_key_path", help="Path to SSH private key that should be used for SSH'ing ZFS " "storage host. Not used for replication operations. Optional."), cfg.ListOpt( "zfs_share_helpers", required=True, default=[ "NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper", ], help="Specify list of share export helpers for ZFS storage. " "It should look like following: " "'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. " "Required."), cfg.StrOpt( "zfs_replica_snapshot_prefix", required=True, default="tmp_snapshot_for_replication_", help="Set snapshot prefix for usage in ZFS replication. Required."), ] CONF = cfg.CONF CONF.register_opts(zfsonlinux_opts) LOG = log.getLogger(__name__) def ensure_share_server_not_provided(f): def wrap(self, context, *args, **kwargs): server = kwargs.get('share_server') if server: raise exception.InvalidInput( reason=_("Share server handling is not available. " "But 'share_server' was provided. '%s'. " "Share network should not be used.") % server.get( "id", server)) return f(self, context, *args, **kwargs) return wrap class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver): def __init__(self, *args, **kwargs): super(self.__class__, self).__init__( [False], *args, config_opts=[zfsonlinux_opts], **kwargs) self.replica_snapshot_prefix = ( self.configuration.zfs_replica_snapshot_prefix) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'ZFSonLinux' self.zpool_list = self._get_zpool_list() self.dataset_creation_options = ( self.configuration.zfs_dataset_creation_options) self.share_export_ip = self.configuration.zfs_share_export_ip self.service_ip = self.configuration.zfs_service_ip self.private_storage = kwargs.get('private_storage') self._helpers = {} # Set config based capabilities self._init_common_capabilities() def _init_common_capabilities(self): self.common_capabilities = {} if 'dedup=on' in self.dataset_creation_options: self.common_capabilities['dedupe'] = [True] elif 'dedup=off' in self.dataset_creation_options: self.common_capabilities['dedupe'] = [False] else: self.common_capabilities['dedupe'] = [True, False] if 'compression=off' in self.dataset_creation_options: self.common_capabilities['compression'] = [False] elif any('compression=' in option for option in self.dataset_creation_options): self.common_capabilities['compression'] = [True] else: self.common_capabilities['compression'] = [True, False] # NOTE(vponomaryov): Driver uses 'quota' approach for # ZFS dataset. So, we can consider it as # 'always thin provisioned' because this driver never reserves # space for dataset. self.common_capabilities['thin_provisioning'] = [True] self.common_capabilities['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) self.common_capabilities['qos'] = [False] def _get_zpool_list(self): zpools = [] for zpool in self.configuration.zfs_zpool_list: zpool_name = zpool.split('/')[0] if zpool_name in zpools: raise exception.BadConfigurationException( reason=_("Using the same zpool twice is prohibited. " "Duplicate is '%(zpool)s'. List of zpools: " "%(zpool_list)s.") % { 'zpool': zpool, 'zpool_list': ', '.join( self.configuration.zfs_zpool_list)}) zpools.append(zpool_name) return zpools @zfs_utils.zfs_dataset_synchronized def _delete_dataset_or_snapshot_with_retry(self, name): """Attempts to destroy some dataset or snapshot with retries.""" # NOTE(vponomaryov): it is possible to see 'dataset is busy' error # under the load. So, we are ok to perform retry in this case. mountpoint = self.get_zfs_option(name, 'mountpoint') if '@' not in name: # NOTE(vponomaryov): check that dataset has no open files. start_point = time.time() while time.time() - start_point < 60: try: out, err = self.execute('lsof', '-w', mountpoint) except exception.ProcessExecutionError: # NOTE(vponomaryov): lsof returns code 1 if search # didn't give results. break LOG.debug("Cannot destroy dataset '%(name)s', it has " "opened files. Will wait 2 more seconds. " "Out: \n%(out)s", { 'name': name, 'out': out}) time.sleep(2) else: raise exception.ZFSonLinuxException( msg=_("Could not destroy '%s' dataset, " "because it had opened files.") % name) # NOTE(vponomaryov): Now, when no file usages and mounts of dataset # exist, destroy dataset. try: self.zfs('destroy', '-f', name) return except exception.ProcessExecutionError: LOG.info(_LI("Failed to destroy ZFS dataset, retrying one time")) # NOTE(bswartz): There appears to be a bug in ZFS when creating and # destroying datasets concurrently where the filesystem remains mounted # even though ZFS thinks it's unmounted. The most reliable workaround # I've found is to force the unmount, then retry the destroy, with # short pauses around the unmount. time.sleep(1) try: self.execute('sudo', 'umount', mountpoint) except exception.ProcessExecutionError: # Ignore failed umount, it's normal pass time.sleep(1) # This time the destroy is expected to succeed. self.zfs('destroy', '-f', name) def _setup_helpers(self): """Setups share helper for ZFS backend.""" self._helpers = {} helpers = self.configuration.zfs_share_helpers if helpers: for helper_str in helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self.configuration) else: raise exception.BadConfigurationException( reason=_( "No share helpers selected for ZFSonLinux Driver. " "Please specify using config option 'zfs_share_helpers'.")) def _get_share_helper(self, share_proto): """Returns share helper specific for used share protocol.""" helper = self._helpers.get(share_proto) if helper: return helper else: raise exception.InvalidShare( reason=_("Wrong, unsupported or disabled protocol - " "'%s'.") % share_proto) def do_setup(self, context): """Perform basic setup and checks.""" super(self.__class__, self).do_setup(context) self._setup_helpers() for ip in (self.share_export_ip, self.service_ip): if not utils.is_valid_ip_address(ip, 4): raise exception.BadConfigurationException( reason=_("Wrong IP address provided: " "%s") % self.share_export_ip) if not self.zpool_list: raise exception.BadConfigurationException( reason=_("No zpools specified for usage: " "%s") % self.zpool_list) # Make pool mounts shared so that cloned namespaces receive unmounts # and don't prevent us from unmounting datasets for zpool in self.configuration.zfs_zpool_list: self.execute('sudo', 'mount', '--make-rshared', ('/%s' % zpool)) if self.configuration.zfs_use_ssh: # Check workability of SSH executor self.ssh_executor('whoami') def _get_pools_info(self): """Returns info about all pools used by backend.""" pools = [] for zpool in self.zpool_list: free_size = self.get_zpool_option(zpool, 'free') free_size = utils.translate_string_size_to_float(free_size) total_size = self.get_zpool_option(zpool, 'size') total_size = utils.translate_string_size_to_float(total_size) pool = { 'pool_name': zpool, 'total_capacity_gb': float(total_size), 'free_capacity_gb': float(free_size), 'reserved_percentage': self.configuration.reserved_share_percentage, } pool.update(self.common_capabilities) if self.configuration.replication_domain: pool['replication_type'] = 'readable' pools.append(pool) return pools def _update_share_stats(self): """Retrieves share stats info.""" data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'NFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'consistency_group_support': None, 'snapshot_support': True, 'driver_name': 'ZFS', 'pools': self._get_pools_info(), } if self.configuration.replication_domain: data['replication_type'] = 'readable' super(self.__class__, self)._update_share_stats(data) def _get_share_name(self, share_id): """Returns name of dataset used for given share.""" prefix = self.configuration.zfs_dataset_name_prefix or '' return prefix + share_id.replace('-', '_') def _get_snapshot_name(self, snapshot_id): """Returns name of dataset snapshot used for given share snapshot.""" prefix = self.configuration.zfs_dataset_snapshot_name_prefix or '' return prefix + snapshot_id.replace('-', '_') def _get_dataset_creation_options(self, share, is_readonly=False): """Returns list of options to be used for dataset creation.""" options = ['quota=%sG' % share['size']] extra_specs = share_types.get_extra_specs_from_share(share) dedupe_set = False dedupe = extra_specs.get('dedupe') if dedupe: dedupe = strutils.bool_from_string( dedupe.lower().split(' ')[-1], default=dedupe) if (dedupe in self.common_capabilities['dedupe']): options.append('dedup=%s' % ('on' if dedupe else 'off')) dedupe_set = True else: raise exception.ZFSonLinuxException(msg=_( "Cannot use requested '%(requested)s' value of 'dedupe' " "extra spec. It does not fit allowed value '%(allowed)s' " "that is configured for backend.") % { 'requested': dedupe, 'allowed': self.common_capabilities['dedupe']}) compression_set = False compression_type = extra_specs.get('zfsonlinux:compression') if compression_type: if (compression_type == 'off' and False in self.common_capabilities['compression']): options.append('compression=off') compression_set = True elif (compression_type != 'off' and True in self.common_capabilities['compression']): options.append('compression=%s' % compression_type) compression_set = True else: raise exception.ZFSonLinuxException(msg=_( "Cannot use value '%s' of extra spec " "'zfsonlinux:compression' because compression is disabled " "for this backend. Set extra spec 'compression=True' to " "make scheduler pick up appropriate backend." ) % compression_type) for option in self.dataset_creation_options or []: if any(v in option for v in ( 'readonly', 'sharenfs', 'sharesmb', 'quota')): continue if 'dedup' in option and dedupe_set is True: continue if 'compression' in option and compression_set is True: continue options.append(option) if is_readonly: options.append('readonly=on') else: options.append('readonly=off') return options def _get_dataset_name(self, share): """Returns name of dataset used for given share.""" pool_name = share_utils.extract_host(share['host'], level='pool') # Pick pool with nested dataset name if set up for pool in self.configuration.zfs_zpool_list: pool_data = pool.split('/') if (pool_name == pool_data[0] and len(pool_data) > 1): pool_name = pool if pool_name[-1] == '/': pool_name = pool_name[0:-1] break dataset_name = self._get_share_name(share['id']) full_dataset_name = '%(pool)s/%(dataset)s' % { 'pool': pool_name, 'dataset': dataset_name} return full_dataset_name @ensure_share_server_not_provided def create_share(self, context, share, share_server=None): """Is called to create a share.""" options = self._get_dataset_creation_options(share, is_readonly=False) cmd = ['create'] for option in options: cmd.extend(['-o', option]) dataset_name = self._get_dataset_name(share) cmd.append(dataset_name) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } pool_name = share_utils.extract_host(share['host'], level='pool') self.private_storage.update( share['id'], { 'entity_type': 'share', 'dataset_name': dataset_name, 'ssh_cmd': ssh_cmd, # used in replication 'pool_name': pool_name, # used in replication 'used_options': ' '.join(options), } ) self.zfs(*cmd) return self._get_share_helper( share['share_proto']).create_exports(dataset_name) @ensure_share_server_not_provided def delete_share(self, context, share, share_server=None): """Is called to remove a share.""" pool_name = self.private_storage.get(share['id'], 'pool_name') dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] != dataset_name: continue # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) snapshots = self.parse_zfs_answer(out) full_snapshot_prefix = ( dataset_name + '@' + self.replica_snapshot_prefix) for snap in snapshots: if full_snapshot_prefix in snap['NAME']: self._delete_dataset_or_snapshot_with_retry(snap['NAME']) self._get_share_helper( share['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( _LW("Share with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted."), {'id': share['id'], 'name': dataset_name}) self.private_storage.delete(share['id']) @ensure_share_server_not_provided def create_snapshot(self, context, snapshot, share_server=None): """Is called to create a snapshot.""" dataset_name = self.private_storage.get( snapshot['share_instance_id'], 'dataset_name') snapshot_tag = self._get_snapshot_name(snapshot['id']) snapshot_name = dataset_name + '@' + snapshot_tag self.private_storage.update( snapshot['snapshot_id'], { 'entity_type': 'snapshot', 'snapshot_tag': snapshot_tag, } ) self.zfs('snapshot', snapshot_name) @ensure_share_server_not_provided def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove a snapshot.""" return self._delete_snapshot(context, snapshot) def _get_saved_snapshot_name(self, snapshot_instance): snapshot_tag = self.private_storage.get( snapshot_instance['snapshot_id'], 'snapshot_tag') dataset_name = self.private_storage.get( snapshot_instance['share_instance_id'], 'dataset_name') snapshot_name = dataset_name + '@' + snapshot_tag return snapshot_name def _delete_snapshot(self, context, snapshot): snapshot_name = self._get_saved_snapshot_name(snapshot) out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == snapshot_name: self._delete_dataset_or_snapshot_with_retry(snapshot_name) break else: LOG.warning( _LW("Snapshot with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted."), {'id': snapshot['id'], 'name': snapshot_name}) self.private_storage.delete(snapshot['id']) @ensure_share_server_not_provided def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create a share from snapshot.""" dataset_name = self._get_dataset_name(share) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } pool_name = share_utils.extract_host(share['host'], level='pool') options = self._get_dataset_creation_options(share, is_readonly=False) self.private_storage.update( share['id'], { 'entity_type': 'share', 'dataset_name': dataset_name, 'ssh_cmd': ssh_cmd, # used in replication 'pool_name': pool_name, # used in replication 'used_options': options, } ) snapshot_name = self._get_saved_snapshot_name(snapshot) self.execute( # NOTE(vponomaryov): SSH is used as workaround for 'execute' # implementation restriction that does not support usage of '|'. 'ssh', ssh_cmd, 'sudo', 'zfs', 'send', '-vDp', snapshot_name, '|', 'sudo', 'zfs', 'receive', '-v', dataset_name, ) # Apply options based on used share type that may differ from # one used for original share. for option in options: self.zfs('set', option, dataset_name) # Delete with retry as right after creation it may be temporary busy. self.execute_with_retry( 'sudo', 'zfs', 'destroy', dataset_name + '@' + snapshot_name.split('@')[-1]) return self._get_share_helper( share['share_proto']).create_exports(dataset_name) def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ pool_name = share_utils.extract_host(share['host'], level='pool') return pool_name @ensure_share_server_not_provided def ensure_share(self, context, share, share_server=None): """Invoked to ensure that given share is exported.""" dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) pool_name = share_utils.extract_host(share['host'], level='pool') out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } self.private_storage.update( share['id'], {'ssh_cmd': ssh_cmd}) sharenfs = self.get_zfs_option(dataset_name, 'sharenfs') if sharenfs != 'off': self.zfs('share', dataset_name) export_locations = self._get_share_helper( share['share_proto']).get_exports(dataset_name) return export_locations else: raise exception.ShareResourceNotFound(share_id=share['id']) def get_network_allocations_number(self): """ZFS does not handle networking. Return 0.""" return 0 @ensure_share_server_not_provided def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" dataset_name = self._get_dataset_name(share) self.zfs('set', 'quota=%sG' % new_size, dataset_name) @ensure_share_server_not_provided def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" dataset_name = self._get_dataset_name(share) consumed_space = self.get_zfs_option(dataset_name, 'used') consumed_space = utils.translate_string_size_to_float(consumed_space) if consumed_space >= new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self.zfs('set', 'quota=%sG' % new_size, dataset_name) @ensure_share_server_not_provided def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Updates access rules for given share.""" dataset_name = self._get_dataset_name(share) return self._get_share_helper(share['share_proto']).update_access( dataset_name, access_rules, add_rules, delete_rules) def unmanage(self, share): """Removes the specified share from Manila management.""" self.private_storage.delete(share['id']) def _get_replication_snapshot_prefix(self, replica): """Returns replica-based snapshot prefix.""" replication_snapshot_prefix = "%s_%s" % ( self.replica_snapshot_prefix, replica['id'].replace('-', '_')) return replication_snapshot_prefix def _get_replication_snapshot_tag(self, replica): """Returns replica- and time-based snapshot tag.""" current_time = timeutils.utcnow().isoformat() snapshot_tag = "%s_time_%s" % ( self._get_replication_snapshot_prefix(replica), current_time) return snapshot_tag def _get_active_replica(self, replica_list): for replica in replica_list: if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE: return replica msg = _("Active replica not found.") raise exception.ReplicationException(reason=msg) @ensure_share_server_not_provided def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicates the active replica to a new replica on this backend.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') dst_dataset_name = self._get_dataset_name(new_replica) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } snapshot_tag = self._get_replication_snapshot_tag(new_replica) src_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': src_dataset_name, } ) # Save valuable data to DB self.private_storage.update(active_replica['id'], { 'repl_snapshot_tag': snapshot_tag, }) self.private_storage.update(new_replica['id'], { 'entity_type': 'replica', 'replica_type': 'readable', 'dataset_name': dst_dataset_name, 'ssh_cmd': ssh_cmd, 'pool_name': share_utils.extract_host( new_replica['host'], level='pool'), 'repl_snapshot_tag': snapshot_tag, }) # Create temporary snapshot. It will exist until following replica sync # After it - new one will appear and so in loop. self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Send/receive temporary snapshot out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|', 'ssh', ssh_cmd, 'sudo', 'zfs', 'receive', '-v', dst_dataset_name, ) msg = ("Info about replica '%(replica_id)s' creation is following: " "\n%(out)s") LOG.debug(msg, {'replica_id': new_replica['id'], 'out': out}) # Make replica readonly self.zfs('set', 'readonly=on', dst_dataset_name) # Set original share size as quota to new replica self.zfs('set', 'quota=%sG' % active_replica['size'], dst_dataset_name) # Apply access rules from original share self._get_share_helper(new_replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) return { 'export_locations': self._get_share_helper( new_replica['share_proto']).create_exports(dst_dataset_name), 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } @ensure_share_server_not_provided def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Deletes a replica. This is called on the destination backend.""" pool_name = self.private_storage.get(replica['id'], 'pool_name') dataset_name = self.private_storage.get(replica['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(replica) # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) data = self.parse_zfs_answer(out) for datum in data: if dataset_name in datum['NAME']: self._delete_dataset_or_snapshot_with_retry(datum['NAME']) # Now we delete dataset itself out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: self._get_share_helper( replica['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( _LW("Share replica with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted."), {'id': replica['id'], 'name': dataset_name}) self.private_storage.delete(replica['id']) @ensure_share_server_not_provided def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Syncs replica and updates its 'replica_state'.""" return self._update_replica_state( context, replica_list, replica, replica_snapshots, access_rules) def _update_replica_state(self, context, replica_list, replica, replica_snapshots=None, access_rules=None): active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') ssh_to_dst_cmd = self.private_storage.get( replica['id'], 'ssh_cmd') dst_dataset_name = self.private_storage.get( replica['id'], 'dataset_name') # Create temporary snapshot previous_snapshot_tag = self.private_storage.get( replica['id'], 'repl_snapshot_tag') snapshot_tag = self._get_replication_snapshot_tag(replica) src_snapshot_name = src_dataset_name + '@' + snapshot_tag self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Make sure it is readonly self.zfs('set', 'readonly=on', dst_dataset_name) # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name, ) msg = ("Info about last replica '%(replica_id)s' sync is following: " "\n%(out)s") LOG.debug(msg, {'replica_id': replica['id'], 'out': out}) # Update DB data that will be used on following replica sync self.private_storage.update(active_replica['id'], { 'repl_snapshot_tag': snapshot_tag, }) self.private_storage.update( replica['id'], {'repl_snapshot_tag': snapshot_tag}) # Destroy all snapshots on dst filesystem except referenced ones. snap_references = set() for repl in replica_list: snap_references.add( self.private_storage.get(repl['id'], 'repl_snapshot_tag')) dst_pool_name = dst_dataset_name.split('/')[0] out, err = self.zfs('list', '-r', '-t', 'snapshot', dst_pool_name) data = self.parse_zfs_answer(out) for datum in data: if (dst_dataset_name in datum['NAME'] and '@' + self.replica_snapshot_prefix in datum['NAME'] and datum['NAME'].split('@')[-1] not in snap_references): self._delete_dataset_or_snapshot_with_retry(datum['NAME']) # Destroy all snapshots on src filesystem except referenced ones. src_pool_name = src_snapshot_name.split('/')[0] out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', src_pool_name, ) data = self.parse_zfs_answer(out) full_src_snapshot_prefix = ( src_dataset_name + '@' + self._get_replication_snapshot_prefix(replica)) for datum in data: if (full_src_snapshot_prefix in datum['NAME'] and datum['NAME'].split('@')[-1] not in snap_references): self.execute_with_retry( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'destroy', '-f', datum['NAME'], ) if access_rules: # Apply access rules from original share # TODO(vponomaryov): we should remove somehow rules that were # deleted on active replica after creation of secondary replica. # For the moment there will be difference and it can be considered # as a bug. self._get_share_helper(replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) # Return results return constants.REPLICA_STATE_IN_SYNC @ensure_share_server_not_provided def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): """Promotes secondary replica to active and active to secondary.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') dst_dataset_name = self.private_storage.get( replica['id'], 'dataset_name') replica_dict = { r['id']: { 'id': r['id'], # NOTE(vponomaryov): access rules will be updated in next # 'sync' operation. 'access_rules_status': constants.STATUS_OUT_OF_SYNC, } for r in replica_list } try: # Mark currently active replica as readonly self.execute( 'ssh', ssh_to_src_cmd, 'set', 'readonly=on', src_dataset_name, ) # Create temporary snapshot of currently active replica snapshot_tag = self._get_replication_snapshot_tag(active_replica) src_snapshot_name = src_dataset_name + '@' + snapshot_tag self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Apply temporary snapshot to all replicas for repl in replica_list: if repl['replica_state'] == constants.REPLICA_STATE_ACTIVE: continue previous_snapshot_tag = self.private_storage.get( repl['id'], 'repl_snapshot_tag') dataset_name = self.private_storage.get( repl['id'], 'dataset_name') ssh_to_dst_cmd = self.private_storage.get( repl['id'], 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"), {'id': repl['id'], 'e': e}) replica_dict[repl['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) continue msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': repl['id'], 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( repl['id'], {'repl_snapshot_tag': snapshot_tag}) # Update latest replication snapshot for currently active replica self.private_storage.update( active_replica['id'], {'repl_snapshot_tag': snapshot_tag}) replica_dict[active_replica['id']]['replica_state'] = ( constants.REPLICA_STATE_IN_SYNC) except Exception as e: LOG.warning( _LW("Failed to update currently active replica. \n%s"), e) replica_dict[active_replica['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) # Create temporary snapshot of new replica and sync it with other # secondary replicas. snapshot_tag = self._get_replication_snapshot_tag(replica) src_snapshot_name = dst_dataset_name + '@' + snapshot_tag ssh_to_src_cmd = self.private_storage.get(replica['id'], 'ssh_cmd') self.zfs('snapshot', src_snapshot_name) for repl in replica_list: if (repl['replica_state'] == constants.REPLICA_STATE_ACTIVE or repl['id'] == replica['id']): continue previous_snapshot_tag = self.private_storage.get( repl['id'], 'repl_snapshot_tag') dataset_name = self.private_storage.get( repl['id'], 'dataset_name') ssh_to_dst_cmd = self.private_storage.get( repl['id'], 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to sync replica %(id)s. %(e)s"), {'id': repl['id'], 'e': e}) replica_dict[repl['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) continue msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': repl['id'], 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( repl['id'], {'repl_snapshot_tag': snapshot_tag}) # Update latest replication snapshot for new active replica self.private_storage.update( replica['id'], {'repl_snapshot_tag': snapshot_tag}) replica_dict[replica['id']]['replica_state'] = ( constants.REPLICA_STATE_ACTIVE) self._get_share_helper(replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) replica_dict[replica['id']]['access_rules_status'] = ( constants.STATUS_ACTIVE) self.zfs('set', 'readonly=off', dst_dataset_name) return list(replica_dict.values()) @ensure_share_server_not_provided def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Create a snapshot and update across the replicas.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') replica_snapshots_dict = { si['id']: {'id': si['id']} for si in replica_snapshots} active_snapshot_instance_id = [ si['id'] for si in replica_snapshots if si['share_instance_id'] == active_replica['id']][0] snapshot_tag = self._get_snapshot_name(active_snapshot_instance_id) # Replication should not be dependent on manually created snapshots # so, create additional one, newer, that will be used for replication # synchronizations. repl_snapshot_tag = self._get_replication_snapshot_tag(active_replica) src_snapshot_name = src_dataset_name + '@' + repl_snapshot_tag self.private_storage.update( replica_snapshots[0]['snapshot_id'], { 'entity_type': 'snapshot', 'snapshot_tag': snapshot_tag, } ) for tag in (snapshot_tag, repl_snapshot_tag): self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_dataset_name + '@' + tag, ) # Populate snapshot to all replicas for replica_snapshot in replica_snapshots: replica_id = replica_snapshot['share_instance_id'] if replica_id == active_replica['id']: replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_AVAILABLE) continue previous_snapshot_tag = self.private_storage.get( replica_id, 'repl_snapshot_tag') dst_dataset_name = self.private_storage.get( replica_id, 'dataset_name') ssh_to_dst_cmd = self.private_storage.get(replica_id, 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning( _LW("Failed to sync snapshot instance %(id)s. %(e)s"), {'id': replica_snapshot['id'], 'e': e}) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_ERROR) continue replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_AVAILABLE) msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': replica_id, 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( replica_id, {'repl_snapshot_tag': repl_snapshot_tag}) # Update latest replication snapshot for currently active replica self.private_storage.update( active_replica['id'], {'repl_snapshot_tag': repl_snapshot_tag}) return list(replica_snapshots_dict.values()) @ensure_share_server_not_provided def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Delete a snapshot by deleting its instances across the replicas.""" active_replica = self._get_active_replica(replica_list) replica_snapshots_dict = { si['id']: {'id': si['id']} for si in replica_snapshots} for replica_snapshot in replica_snapshots: replica_id = replica_snapshot['share_instance_id'] snapshot_name = self._get_saved_snapshot_name(replica_snapshot) if active_replica['id'] == replica_id: self._delete_snapshot(context, replica_snapshot) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_DELETED) continue ssh_cmd = self.private_storage.get(replica_id, 'ssh_cmd') out, err = self.execute( 'ssh', ssh_cmd, 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', snapshot_name, ) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] != snapshot_name: continue self.execute_with_retry( 'ssh', ssh_cmd, 'sudo', 'zfs', 'destroy', '-f', datum['NAME'], ) self.private_storage.delete(replica_snapshot['id']) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_DELETED) return list(replica_snapshots_dict.values()) @ensure_share_server_not_provided def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): """Update the status of a snapshot instance that lives on a replica.""" self._update_replica_state(context, replica_list, share_replica) snapshot_name = self._get_saved_snapshot_name(replica_snapshot) out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name) data = self.parse_zfs_answer(out) snapshot_found = False for datum in data: if datum['NAME'] == snapshot_name: snapshot_found = True break return_dict = {'id': replica_snapshot['id']} if snapshot_found: return_dict.update({'status': constants.STATUS_AVAILABLE}) else: return_dict.update({'status': constants.STATUS_ERROR}) return return_dict manila-2.0.0/manila/share/drivers/huawei/0000775000567000056710000000000012701407265021422 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/huawei/constants.py0000664000567000056710000000460512701407107024010 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_ETH_RUNNING = "10" STATUS_FS_HEALTH = "1" STATUS_FS_RUNNING = "27" STATUS_JOIN_DOMAIN = '1' STATUS_EXIT_DOMAIN = '0' STATUS_SERVICE_RUNNING = "2" STATUS_QOS_ACTIVE = '2' DEFAULT_WAIT_INTERVAL = 3 DEFAULT_TIMEOUT = 60 MAX_FS_NUM_IN_QOS = 64 MSG_SNAPSHOT_NOT_FOUND = 1073754118 IP_ALLOCATIONS_DHSS_FALSE = 0 IP_ALLOCATIONS_DHSS_TRUE = 1 SOCKET_TIMEOUT = 52 LOGIN_SOCKET_TIMEOUT = 4 QOS_NAME_PREFIX = 'OpenStack_' SYSTEM_NAME_PREFIX = "Array-" MIN_ARRAY_VERSION_FOR_QOS = 'V300R003C00' TMP_PATH_SRC_PREFIX = "huawei_manila_tmp_path_src_" TMP_PATH_DST_PREFIX = "huawei_manila_tmp_path_dst_" ACCESS_NFS_RW = "1" ACCESS_NFS_RO = "0" ACCESS_CIFS_FULLCONTROL = "1" ACCESS_CIFS_RO = "0" ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_LOGICAL_PORT_EXIST = 1073813505 ERROR_USER_OR_GROUP_NOT_EXIST = 1077939723 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' PORT_TYPE_VLAN = '8' SORT_BY_VLAN = 1 SORT_BY_LOGICAL = 2 ALLOC_TYPE_THIN_FLAG = "1" ALLOC_TYPE_THICK_FLAG = "0" ALLOC_TYPE_THIN = "Thin" ALLOC_TYPE_THICK = "Thick" THIN_PROVISIONING = "true" THICK_PROVISIONING = "false" OPTS_QOS_VALUE = { 'maxiops': None, 'miniops': None, 'minbandwidth': None, 'maxbandwidth': None, 'latency': None, 'iotype': None } QOS_LOWER_LIMIT = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] QOS_UPPER_LIMIT = ['MAXIOPS', 'MAXBANDWIDTH'] OPTS_CAPABILITIES = { 'dedupe': False, 'compression': False, 'huawei_smartcache': False, 'huawei_smartpartition': False, 'thin_provisioning': None, 'qos': False, } OPTS_VALUE = { 'cachename': None, 'partitionname': None, } OPTS_VALUE.update(OPTS_QOS_VALUE) OPTS_ASSOCIATE = { 'huawei_smartcache': 'cachename', 'huawei_smartpartition': 'partitionname', 'qos': OPTS_QOS_VALUE, } manila-2.0.0/manila/share/drivers/huawei/__init__.py0000664000567000056710000000000012701407107023514 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/huawei/huawei_utils.py0000664000567000056710000000467512701407107024505 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log from manila.i18n import _LE from manila.share.drivers.huawei import constants from manila.share import share_types LOG = log.getLogger(__name__) def get_share_extra_specs_params(type_id): """Return the parameters for creating the share.""" opts = None if type_id is not None: specs = share_types.get_share_type_extra_specs(type_id) opts = _get_opts_from_specs(specs) LOG.debug('Get share type extra specs: %s', opts) return opts def _get_opts_from_specs(specs): opts = copy.deepcopy(constants.OPTS_CAPABILITIES) opts.update(constants.OPTS_VALUE) for key, value in specs.items(): # Get the scope, if using scope format scope = None key_split = key.split(':') if len(key_split) not in (1, 2): continue if len(key_split) == 1: key = key_split[0] else: scope = key_split[0] key = key_split[1] if scope: scope = scope.lower() if key: key = key.lower() # We want both the scheduler and the driver to act on the value. if ((not scope or scope == 'capabilities') and key in constants.OPTS_CAPABILITIES): words = value.split() if not (words and len(words) == 2 and words[0] == ''): LOG.error(_LE("Extra specs must be specified as " "capabilities:%s=' True'."), key) else: opts[key] = words[1].lower() if ((scope in constants.OPTS_CAPABILITIES) and (key in constants.OPTS_VALUE)): if ((scope in constants.OPTS_ASSOCIATE) and (key in constants.OPTS_ASSOCIATE[scope])): opts[key] = value return opts manila-2.0.0/manila/share/drivers/huawei/base.py0000664000567000056710000000610312701407107022701 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Abstract base class to work with share.""" import abc import six @six.add_metaclass(abc.ABCMeta) class HuaweiBase(object): """Interface to work with share.""" def __init__(self, configuration): """Do initialization.""" self.configuration = configuration @abc.abstractmethod def create_share(self, share, share_server): """Is called to create share.""" @abc.abstractmethod def create_snapshot(self, snapshot, share_server): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, share, share_server): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, snapshot, share_server): """Is called to remove snapshot.""" @abc.abstractmethod def allow_access(self, share, access, share_server): """Allow access to the share.""" @abc.abstractmethod def deny_access(self, share, access, share_server): """Deny access to the share.""" @abc.abstractmethod def ensure_share(self, share, share_server=None): """Ensure that share is exported.""" @abc.abstractmethod def update_access(self, share, access_rules, add_rules, delete_rules, share_server): """Update access rules list.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server): """Extends size of existing share.""" @abc.abstractmethod def create_share_from_snapshot(self, share, snapshot, share_server=None): """Create share from snapshot.""" @abc.abstractmethod def shrink_share(self, share, new_size, share_server): """Shrinks size of existing share.""" @abc.abstractmethod def manage_existing(self, share, driver_options): """Manage existing share.""" @abc.abstractmethod def get_network_allocations_number(self): """Get number of network interfaces to be created.""" @abc.abstractmethod def get_pool(self, share): """Return pool name where the share resides on.""" def update_share_stats(self, stats_dict): """Retrieve stats info from share group.""" @abc.abstractmethod def setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" @abc.abstractmethod def teardown_server(self, server_details, security_services=None): """Teardown share server.""" manila-2.0.0/manila/share/drivers/huawei/huawei_nas.py0000664000567000056710000001737512701407107024127 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Huawei Nas Driver for Huawei storage arrays.""" from xml.etree import ElementTree as ET from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from manila import exception from manila.i18n import _ from manila.share import driver HUAWEI_UNIFIED_DRIVER_REGISTRY = { 'V3': 'manila.share.drivers.huawei.v3.connection.V3StorageConnection', } huawei_opts = [ cfg.StrOpt('manila_huawei_conf_file', default='/etc/manila/manila_huawei_conf.xml', help='The configuration file for the Manila Huawei driver.')] CONF = cfg.CONF CONF.register_opts(huawei_opts) LOG = log.getLogger(__name__) class HuaweiNasDriver(driver.ShareDriver): """Huawei Share Driver. Executes commands relating to Shares. API version history: 1.0 - Initial version. 1.1 - Add shrink share. Add extend share. Add manage share. Add share level(ro). Add smartx capabilities. Support multi pools in one backend. 1.2 - Add share server support. Add ensure share. Add QoS support. Add create share from snapshot. """ def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug("Enter into init function.") super(HuaweiNasDriver, self).__init__((True, False), *args, **kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(huawei_opts) backend_driver = self.get_backend_driver() self.plugin = importutils.import_object(backend_driver, self.configuration) else: raise exception.InvalidShare( reason=_("Huawei configuration missing.")) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self.plugin.check_conf_file() self.plugin.check_service() def get_backend_driver(self): filename = self.configuration.manila_huawei_conf_file try: tree = ET.parse(filename) root = tree.getroot() except Exception as err: message = (_('Read Huawei config file(%(filename)s)' ' for Manila error: %(err)s') % {'filename': filename, 'err': err}) LOG.error(message) raise exception.InvalidInput(reason=message) product = root.findtext('Storage/Product') backend_driver = HUAWEI_UNIFIED_DRIVER_REGISTRY.get(product) if backend_driver is None: raise exception.InvalidInput( reason=_('Product %s is not supported. Product ' 'must be set to V3.') % product) return backend_driver def do_setup(self, context): """Any initialization the huawei nas driver does while starting.""" LOG.debug("Do setup the plugin.") self.plugin.connect() def create_share(self, context, share, share_server=None): """Create a share.""" LOG.debug("Create a share.") location = self.plugin.create_share(share, share_server) return location def extend_share(self, share, new_size, share_server=None): LOG.debug("Extend a share.") self.plugin.extend_share(share, new_size, share_server) def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Create a share from snapshot.""" LOG.debug("Create a share from snapshot %s.", snapshot['snapshot_id']) location = self.plugin.create_share_from_snapshot(share, snapshot) return location def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" LOG.debug("Shrink a share.") self.plugin.shrink_share(share, new_size, share_server) def delete_share(self, context, share, share_server=None): """Delete a share.""" LOG.debug("Delete a share.") self.plugin.delete_share(share, share_server) def create_snapshot(self, context, snapshot, share_server=None): """Create a snapshot.""" LOG.debug("Create a snapshot.") self.plugin.create_snapshot(snapshot, share_server) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug("Delete a snapshot.") self.plugin.delete_snapshot(snapshot, share_server) def ensure_share(self, context, share, share_server=None): """Ensure that share is exported.""" LOG.debug("Ensure share.") location = self.plugin.ensure_share(share, share_server) return location def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" LOG.debug("Allow access.") self.plugin.allow_access(share, access, share_server) def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" LOG.debug("Deny access.") self.plugin.deny_access(share, access, share_server) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules list.""" LOG.debug("Update access.") self.plugin.update_access(share, access_rules, add_rules, delete_rules, share_server) def get_pool(self, share): """Return pool name where the share resides on.""" LOG.debug("Get pool.") return self.plugin.get_pool(share) def get_network_allocations_number(self): """Get number of network interfaces to be created.""" LOG.debug("Get network allocations number.") return self.plugin.get_network_allocations_number() def manage_existing(self, share, driver_options): """Manage existing share.""" LOG.debug("Manage existing share to manila.") share_size, location = self.plugin.manage_existing(share, driver_options) return {'size': share_size, 'export_locations': location} def _update_share_stats(self): """Retrieve status info from share group.""" backend_name = self.configuration.safe_get('share_backend_name') data = dict( share_backend_name=backend_name or 'HUAWEI_NAS_Driver', vendor_name='Huawei', driver_version='1.2', storage_protocol='NFS_CIFS', qos=True, total_capacity_gb=0.0, free_capacity_gb=0.0) self.plugin.update_share_stats(data) super(HuaweiNasDriver, self)._update_share_stats(data) def _setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" return self.plugin.setup_server(network_info, metadata) def _teardown_server(self, server_details, security_services=None): """Teardown share server.""" return self.plugin.teardown_server(server_details, security_services) manila-2.0.0/manila/share/drivers/huawei/v3/0000775000567000056710000000000012701407265021752 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/huawei/v3/__init__.py0000664000567000056710000000000012701407107024044 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/huawei/v3/helper.py0000664000567000056710000013410512701407107023602 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import copy import time from xml.etree import ElementTree as ET from oslo_log import log from oslo_serialization import jsonutils import six from six.moves import http_cookiejar from six.moves.urllib import request as urlreq # pylint: disable=E0611 from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila.share.drivers.huawei import constants from manila import utils LOG = log.getLogger(__name__) class RestHelper(object): """Helper class for Huawei OceanStor V3 storage system.""" def __init__(self, configuration): self.configuration = configuration self.init_http_head() def init_http_head(self): self.cookie = http_cookiejar.CookieJar() self.url = None self.headers = { "Connection": "keep-alive", "Content-Type": "application/json", } def do_call(self, url, data=None, method=None, calltimeout=constants.SOCKET_TIMEOUT): """Send requests to server. Send HTTPS call, get response in JSON. Convert response into Python Object and return it. """ if self.url: url = self.url + url if "xx/sessions" not in url: LOG.debug('Request URL: %(url)s\n' 'Call Method: %(method)s\n' 'Request Data: %(data)s\n', {'url': url, 'method': method, 'data': data}) opener = urlreq.build_opener(urlreq.HTTPCookieProcessor(self.cookie)) urlreq.install_opener(opener) result = None try: req = urlreq.Request(url, data, self.headers) if method: req.get_method = lambda: method res_temp = urlreq.urlopen(req, timeout=calltimeout) res = res_temp.read().decode("utf-8") LOG.debug('Response Data: %(res)s.', {'res': res}) except Exception as err: LOG.error(_LE('\nBad response from server: %(url)s.' ' Error: %(err)s'), {'url': url, 'err': err}) res = '{"error":{"code":%s,' \ '"description":"Connect server error"}}' \ % constants.ERROR_CONNECT_TO_SERVER try: result = jsonutils.loads(res) except Exception as err: err_msg = (_('JSON transfer error: %s.') % err) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return result def login(self): """Login huawei array.""" login_info = self._get_login_info() urlstr = login_info['RestURL'] url_list = urlstr.split(";") deviceid = None for item_url in url_list: url = item_url.strip('').strip('\n') + "xx/sessions" data = jsonutils.dumps({"username": login_info['UserName'], "password": login_info['UserPassword'], "scope": "0"}) self.init_http_head() result = self.do_call(url, data, calltimeout=constants.LOGIN_SOCKET_TIMEOUT) if((result['error']['code'] != 0) or ("data" not in result) or (result['data']['deviceid'] is None)): LOG.error(_LE("Login to %s failed, try another."), item_url) continue LOG.debug('Login success: %(url)s\n', {'url': item_url}) deviceid = result['data']['deviceid'] self.url = item_url + deviceid self.headers['iBaseToken'] = result['data']['iBaseToken'] break if deviceid is None: err_msg = _("All url login fail.") LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) return deviceid @utils.synchronized('huawei_manila') def call(self, url, data=None, method=None): """Send requests to server. If fail, try another RestURL. """ deviceid = None old_url = self.url result = self.do_call(url, data, method) error_code = result['error']['code'] if(error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error(_LE("Can't open the recent url, re-login.")) deviceid = self.login() if deviceid is not None: LOG.debug('Replace URL: \n' 'Old URL: %(old_url)s\n' 'New URL: %(new_url)s\n', {'old_url': old_url, 'new_url': self.url}) result = self.do_call(url, data, method) return result def _create_filesystem(self, fs_param): """Create file system.""" url = "/filesystem" data = jsonutils.dumps(fs_param) result = self.call(url, data) msg = 'Create filesystem error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _assert_rest_result(self, result, err_str): if result['error']['code'] != 0: err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) def _assert_data_in_result(self, result, msg): if "data" not in result: err_msg = (_('%s "data" was not in result.') % msg) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) def _get_login_info(self): """Get login IP, username and password from config file.""" logininfo = {} filename = self.configuration.manila_huawei_conf_file tree = ET.parse(filename) root = tree.getroot() RestURL = root.findtext('Storage/RestURL') logininfo['RestURL'] = RestURL.strip() # Prefix !$$$ means encoded already. prefix_name = '!$$$' need_encode = False for key in ['UserName', 'UserPassword']: node = root.find('Storage/%s' % key) if node.text.find(prefix_name) > -1: logininfo[key] = base64.b64decode(six.b(node.text[4:])) else: logininfo[key] = node.text node.text = prefix_name + six.text_type( base64.b64encode(six.b(node.text))) need_encode = True if need_encode: self._change_file_mode(filename) try: tree.write(filename, 'UTF-8') except Exception as err: err_msg = (_('File write error %s.') % err) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) return logininfo def _change_file_mode(self, filepath): try: utils.execute('chmod', '666', filepath, run_as_root=True) except Exception as err: LOG.error(_LE('Bad response from change file: %s.') % err) raise err def _create_share(self, share_name, fs_id, share_proto): """Create a share.""" share_url_type = self._get_share_url_type(share_proto) share_path = self._get_share_path(share_name) filepath = {} if share_proto == 'NFS': filepath = { "DESCRIPTION": "", "FSID": fs_id, "SHAREPATH": share_path, } elif share_proto == 'CIFS': filepath = { "SHAREPATH": share_path, "DESCRIPTION": "", "ABEENABLE": "false", "ENABLENOTIFY": "true", "ENABLEOPLOCK": "true", "NAME": share_name.replace("-", "_"), "FSID": fs_id, "TENANCYID": "0", } else: raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) url = "/" + share_url_type data = jsonutils.dumps(filepath) result = self.call(url, data, "POST") msg = 'Create share error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _delete_share_by_id(self, share_id, share_url_type): """Delete share by share id.""" url = "/" + share_url_type + "/" + share_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'Delete share error.') def _delete_fs(self, fs_id): """Delete file system.""" # Get available file system url = "/filesystem/" + fs_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'Delete file system error.') def _get_cifs_service_status(self): url = "/CIFSSERVICE" result = self.call(url, None, "GET") msg = 'Get CIFS service status error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['RUNNINGSTATUS'] def _get_nfs_service_status(self): url = "/NFSSERVICE" result = self.call(url, None, "GET") msg = 'Get NFS service status error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) service = {} service['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS'] service['SUPPORTV3'] = result['data']['SUPPORTV3'] service['SUPPORTV4'] = result['data']['SUPPORTV4'] return service def _start_nfs_service_status(self): url = "/NFSSERVICE" nfsserviceinfo = { "NFSV4DOMAIN": "localdomain", "RUNNINGSTATUS": "2", "SUPPORTV3": 'true', "SUPPORTV4": 'true', "TYPE": "16452", } data = jsonutils.dumps(nfsserviceinfo) result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Start NFS service error.') def _start_cifs_service_status(self): url = "/CIFSSERVICE" cifsserviceinfo = { "ENABLENOTIFY": "true", "ENABLEOPLOCK": "true", "ENABLEOPLOCKLEASE": "false", "GUESTENABLE": "false", "OPLOCKTIMEOUT": "35", "RUNNINGSTATUS": "2", "SECURITYMODEL": "3", "SIGNINGENABLE": "false", "SIGNINGREQUIRED": "false", "TYPE": "16453", } data = jsonutils.dumps(cifsserviceinfo) result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Start CIFS service error.') def _find_pool_info(self, pool_name, result): if pool_name is None: return poolinfo = {} pool_name = pool_name.strip() for item in result.get('data', []): if pool_name == item['NAME'] and '2' == item['USAGETYPE']: poolinfo['name'] = pool_name poolinfo['ID'] = item['ID'] poolinfo['CAPACITY'] = item['USERFREECAPACITY'] poolinfo['TOTALCAPACITY'] = item['USERTOTALCAPACITY'] poolinfo['CONSUMEDCAPACITY'] = item['USERCONSUMEDCAPACITY'] break return poolinfo def _find_all_pool_info(self): url = "/storagepool" result = self.call(url, None) msg = "Query resource pool error." self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result def _read_xml(self): """Open xml file and parse the content.""" filename = self.configuration.manila_huawei_conf_file try: tree = ET.parse(filename) root = tree.getroot() except Exception as err: message = (_('Read Huawei config file(%(filename)s)' ' for Manila error: %(err)s') % {'filename': filename, 'err': err}) LOG.error(message) raise exception.InvalidInput(reason=message) return root def _remove_access_from_share(self, access_id, share_proto): access_type = self._get_share_client_type(share_proto) url = "/" + access_type + "/" + access_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'delete access from share error!') def _get_access_count(self, share_id, share_client_type): url_subfix = ("/" + share_client_type + "/count?" + "filter=PARENTID::" + share_id) url = url_subfix result = self.call(url, None, "GET") msg = "Get access count by share error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return int(result['data']['COUNT']) def _get_all_access_from_share(self, share_id, share_proto): """Return a list of all the access IDs of the share""" share_client_type = self._get_share_client_type(share_proto) count = self._get_access_count(share_id, share_client_type) access_ids = [] range_begin = 0 while count > 0: access_range = self._get_access_from_share_range(share_id, range_begin, share_client_type) for item in access_range: access_ids.append(item['ID']) range_begin += 100 count -= 100 return access_ids def _get_access_from_share(self, share_id, access_to, share_proto): """Segments to find access for a period of 100.""" share_client_type = self._get_share_client_type(share_proto) count = self._get_access_count(share_id, share_client_type) access_id = None range_begin = 0 while count > 0: if access_id: break access_range = self._get_access_from_share_range(share_id, range_begin, share_client_type) for item in access_range: if item['NAME'] in (access_to, '@' + access_to): access_id = item['ID'] range_begin += 100 count -= 100 return access_id def _get_access_from_share_range(self, share_id, range_begin, share_client_type): range_end = range_begin + 100 url = ("/" + share_client_type + "?filter=PARENTID::" + share_id + "&range=[" + six.text_type(range_begin) + "-" + six.text_type(range_end) + "]") result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get access id by share error!') return result.get('data', []) def _get_level_by_access_id(self, access_id, share_proto): share_client_type = self._get_share_client_type(share_proto) url = "/" + share_client_type + "/" + access_id result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get access information error!') access_info = result.get('data', []) access_level = access_info.get('ACCESSVAL') if not access_level: access_level = access_info.get('PERMISSION') return access_level def _change_access_rest(self, access_id, share_proto, access_level): """Change access level of the share.""" if share_proto == 'NFS': self._change_nfs_access_rest(access_id, access_level) elif share_proto == 'CIFS': self._change_cifs_access_rest(access_id, access_level) else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) def _change_nfs_access_rest(self, access_id, access_level): url = "/NFS_SHARE_AUTH_CLIENT/" + access_id access = { "ACCESSVAL": access_level, "SYNC": "0", "ALLSQUASH": "1", "ROOTSQUASH": "0", } data = jsonutils.dumps(access) result = self.call(url, data, "PUT") msg = 'Change access error.' self._assert_rest_result(result, msg) def _change_cifs_access_rest(self, access_id, access_level): url = "/CIFS_SHARE_AUTH_CLIENT/" + access_id access = { "PERMISSION": access_level, } data = jsonutils.dumps(access) result = self.call(url, data, "PUT") msg = 'Change access error.' self._assert_rest_result(result, msg) def _allow_access_rest(self, share_id, access_to, share_proto, access_level): """Allow access to the share.""" if share_proto == 'NFS': self._allow_nfs_access_rest(share_id, access_to, access_level) elif share_proto == 'CIFS': self._allow_cifs_access_rest(share_id, access_to, access_level) else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) def _allow_nfs_access_rest(self, share_id, access_to, access_level): url = "/NFS_SHARE_AUTH_CLIENT" access = { "TYPE": "16409", "NAME": access_to, "PARENTID": share_id, "ACCESSVAL": access_level, "SYNC": "0", "ALLSQUASH": "1", "ROOTSQUASH": "0", } data = jsonutils.dumps(access) result = self.call(url, data, "POST") msg = 'Allow access error.' self._assert_rest_result(result, msg) def _allow_cifs_access_rest(self, share_id, access_to, access_level): url = "/CIFS_SHARE_AUTH_CLIENT" domain_type = { 'local': '2', 'ad': '0' } error_msg = 'Allow access error.' access_info = ('Access info (access_to: %(access_to)s, ' 'access_level: %(access_level)s, share_id: %(id)s)' % {'access_to': access_to, 'access_level': access_level, 'id': share_id}) def send_rest(access_to, domain_type): access = { "NAME": access_to, "PARENTID": share_id, "PERMISSION": access_level, "DOMAINTYPE": domain_type, } data = jsonutils.dumps(access) result = self.call(url, data, "POST") error_code = result['error']['code'] if error_code == 0: return True elif error_code != constants.ERROR_USER_OR_GROUP_NOT_EXIST: self._assert_rest_result(result, error_msg) return False if '\\' not in access_to: # First, try to add user access. LOG.debug('Try to add user access. %s.', access_info) if send_rest(access_to, domain_type['local']): return # Second, if add user access failed, # try to add group access. LOG.debug('Failed with add user access, ' 'try to add group access. %s.', access_info) # Group name starts with @. if send_rest('@' + access_to, domain_type['local']): return else: LOG.debug('Try to add domain user access. %s.', access_info) if send_rest(access_to, domain_type['ad']): return # If add domain user access failed, # try to add domain group access. LOG.debug('Failed with add domain user access, ' 'try to add domain group access. %s.', access_info) # Group name starts with @. if send_rest('@' + access_to, domain_type['ad']): return raise exception.InvalidShare(reason=error_msg) def _get_share_client_type(self, share_proto): share_client_type = None if share_proto == 'NFS': share_client_type = "NFS_SHARE_AUTH_CLIENT" elif share_proto == 'CIFS': share_client_type = "CIFS_SHARE_AUTH_CLIENT" else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return share_client_type def _check_snapshot_id_exist(self, snap_id): """Check the snapshot id exists.""" url_subfix = "/FSSNAPSHOT/" + snap_id url = url_subfix result = self.call(url, None, "GET") if result['error']['code'] == constants.MSG_SNAPSHOT_NOT_FOUND: return False elif result['error']['code'] == 0: return True else: err_str = "Check the snapshot id exists error!" err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) def _delete_snapshot(self, snap_id): """Deletes snapshot.""" url = "/FSSNAPSHOT/%s" % snap_id data = jsonutils.dumps({"TYPE": "48", "ID": snap_id}) result = self.call(url, data, "DELETE") self._assert_rest_result(result, 'Delete snapshot error.') def _create_snapshot(self, sharefsid, snapshot_name): """Create a snapshot.""" filepath = { "PARENTTYPE": "40", "TYPE": "48", "PARENTID": sharefsid, "NAME": snapshot_name.replace("-", "_"), "DESCRIPTION": "", } url = "/FSSNAPSHOT" data = jsonutils.dumps(filepath) result = self.call(url, data, "POST") msg = 'Create a snapshot error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _get_share_by_name(self, share_name, share_url_type): """Segments to find share for a period of 100.""" count = self._get_share_count(share_url_type) share = {} range_begin = 0 while True: if count < 0 or share: break share = self._get_share_by_name_range(share_name, range_begin, share_url_type) range_begin += 100 count -= 100 return share def _get_share_count(self, share_url_type): """Get share count.""" url = "/" + share_url_type + "/count" result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get share count error!') return int(result['data']['COUNT']) def _get_share_by_name_range(self, share_name, range_begin, share_url_type): """Get share by share name.""" range_end = range_begin + 100 url = ("/" + share_url_type + "?range=[" + six.text_type(range_begin) + "-" + six.text_type(range_end) + "]") result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get share by name error!') share_path = self._get_share_path(share_name) share = {} for item in result.get('data', []): if share_path == item['SHAREPATH']: share['ID'] = item['ID'] share['FSID'] = item['FSID'] break return share def _get_share_url_type(self, share_proto): share_url_type = None if share_proto == 'NFS': share_url_type = "NFSHARE" elif share_proto == 'CIFS': share_url_type = "CIFSHARE" else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return share_url_type def _get_fsid_by_name(self, share_name): url = "/FILESYSTEM?range=[0-8191]" result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get filesystem by name error!') sharename = share_name.replace("-", "_") for item in result.get('data', []): if sharename == item['NAME']: return item['ID'] def _get_fs_info_by_id(self, fsid): url = "/filesystem/%s" % fsid result = self.call(url, None, "GET") msg = "Get filesystem info by id error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) fs = {} fs['HEALTHSTATUS'] = result['data']['HEALTHSTATUS'] fs['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS'] fs['CAPACITY'] = result['data']['CAPACITY'] fs['ALLOCTYPE'] = result['data']['ALLOCTYPE'] fs['POOLNAME'] = result['data']['PARENTNAME'] fs['COMPRESSION'] = result['data']['ENABLECOMPRESSION'] fs['DEDUP'] = result['data']['ENABLEDEDUP'] fs['SMARTPARTITIONID'] = result['data']['CACHEPARTITIONID'] fs['SMARTCACHEID'] = result['data']['SMARTCACHEPARTITIONID'] return fs def _get_share_path(self, share_name): share_path = "/" + share_name.replace("-", "_") + "/" return share_path def get_share_name_by_id(self, share_id): share_name = "share_" + share_id return share_name def _get_share_name_by_export_location(self, export_location, share_proto): export_location_split = None share_name = None share_ip = None if export_location: if share_proto == 'NFS': export_location_split = export_location.split(':/') if len(export_location_split) == 2: share_name = export_location_split[1] share_ip = export_location_split[0] elif share_proto == 'CIFS': export_location_split = export_location.split('\\') if (len(export_location_split) == 4 and export_location_split[0] == "" and export_location_split[1] == ""): share_ip = export_location_split[2] share_name = export_location_split[3] if share_name is None: raise exception.InvalidInput( reason=(_('No share with export location %s could be found.') % export_location)) root = self._read_xml() target_ip = root.findtext('Storage/LogicalPortIP') if target_ip: if share_ip != target_ip.strip(): raise exception.InvalidInput( reason=(_('The share IP %s is not configured.') % share_ip)) else: raise exception.InvalidInput( reason=(_('The config parameter LogicalPortIP is not set.'))) return share_name def _get_snapshot_id(self, fs_id, snap_name): snapshot_id = (fs_id + "@" + "share_snapshot_" + snap_name.replace("-", "_")) return snapshot_id def _change_share_size(self, fsid, new_size): url = "/filesystem/%s" % fsid capacityinfo = { "CAPACITY": new_size, } data = jsonutils.dumps(capacityinfo) result = self.call(url, data, "PUT") msg = "Change a share size error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def _change_fs_name(self, fsid, name): url = "/filesystem/%s" % fsid fs_param = { "NAME": name.replace("-", "_"), } data = jsonutils.dumps(fs_param) result = self.call(url, data, "PUT") msg = _("Change filesystem name error.") self._assert_rest_result(result, msg) def _change_extra_specs(self, fsid, extra_specs): url = "/filesystem/%s" % fsid fs_param = { "ENABLEDEDUP": extra_specs['dedupe'], "ENABLECOMPRESSION": extra_specs['compression'] } data = jsonutils.dumps(fs_param) result = self.call(url, data, "PUT") msg = _("Change extra_specs error.") self._assert_rest_result(result, msg) def _get_partition_id_by_name(self, name): url = "/cachepartition" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by name error.')) if "data" in result: for item in result['data']: if name == item['NAME']: return item['ID'] return None def get_partition_info_by_id(self, partitionid): url = '/cachepartition/' + partitionid result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by partition id error.')) return result['data'] def _add_fs_to_partition(self, fs_id, partition_id): url = "/filesystem/associate/cachepartition" data = jsonutils.dumps({"ID": partition_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 268}) result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add filesystem to partition error.')) def _remove_fs_from_partition(self, fs_id, partition_id): url = "/smartPartition/removeFs" data = jsonutils.dumps({"ID": partition_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 268}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove filesystem from partition error.')) def _get_cache_id_by_name(self, name): url = "/SMARTCACHEPARTITION" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get cache by name error.')) if "data" in result: for item in result['data']: if name == item['NAME']: return item['ID'] return None def get_cache_info_by_id(self, cacheid): url = "/SMARTCACHEPARTITION/" + cacheid data = jsonutils.dumps({"TYPE": "273", "ID": cacheid}) result = self.call(url, data, "GET") self._assert_rest_result( result, _('Get smartcache by cache id error.')) return result['data'] def _add_fs_to_cache(self, fs_id, cache_id): url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" data = jsonutils.dumps({"ID": cache_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 273}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add filesystem to cache error.')) def get_qos(self): url = "/ioclass" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result def find_available_qos(self, qos): """"Find available QoS on the array.""" qos_id = None fs_list = [] temp_qos = copy.deepcopy(qos) result = self.get_qos() if 'data' in result: if 'LATENCY' not in temp_qos: temp_qos['LATENCY'] = '0' for item in result['data']: for key in constants.OPTS_QOS_VALUE: if temp_qos.get(key.upper()) != item.get(key.upper()): break else: fs_num = len(item['FSLIST'].split(",")) # We use this QoS only if the filesystems in it is less # than 64, else we cannot add filesystem to this QoS # any more. if (item['RUNNINGSTATUS'] == constants.STATUS_QOS_ACTIVE and fs_num < constants.MAX_FS_NUM_IN_QOS and item['NAME'].startswith( constants.QOS_NAME_PREFIX) and item['LUNLIST'] == '[""]'): qos_id = item['ID'] fs_list = item['FSLIST'] break return (qos_id, fs_list) def add_share_to_qos(self, qos_id, fs_id, fs_list): """Add filesystem to QoS.""" url = "/ioclass/" + qos_id new_fs_list = [] fs_list_string = fs_list[1:-1] for fs_string in fs_list_string.split(","): tmp_fs_id = fs_string[1:-1] if '' != tmp_fs_id and tmp_fs_id != fs_id: new_fs_list.append(tmp_fs_id) new_fs_list.append(fs_id) data = jsonutils.dumps({"FSLIST": new_fs_list, "TYPE": 230, "ID": qos_id}) result = self.call(url, data, "PUT") msg = _('Associate filesystem to Qos error.') self._assert_rest_result(result, msg) def create_qos_policy(self, qos, fs_id): # Get local time. localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) # Package QoS name. qos_name = constants.QOS_NAME_PREFIX + fs_id + '_' + localtime mergedata = { "TYPE": "230", "NAME": qos_name, "FSLIST": ["%s" % fs_id], "CLASSTYPE": "1", "SCHEDULEPOLICY": "2", "SCHEDULESTARTTIME": "1410969600", "STARTTIME": "08:00", "DURATION": "86400", "CYCLESET": "[1,2,3,4,5,6,0]", } mergedata.update(qos) data = jsonutils.dumps(mergedata) url = "/ioclass" result = self.call(url, data) self._assert_rest_result(result, _('Create QoS policy error.')) return result['data']['ID'] def activate_deactivate_qos(self, qos_id, enablestatus): """Activate or deactivate QoS. enablestatus: true (activate) enablestatus: false (deactivate) """ url = "/ioclass/active/" + qos_id data = jsonutils.dumps({ "TYPE": 230, "ID": qos_id, "ENABLESTATUS": enablestatus}) result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Activate or deactivate QoS error.')) def change_fs_priority_high(self, fs_id): """Change fs priority to high.""" url = "/filesystem/" + fs_id data = jsonutils.dumps({"IOPRIORITY": "3"}) result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Change filesystem priority error.')) def delete_qos_policy(self, qos_id): """Delete a QoS policy.""" url = "/ioclass/" + qos_id data = jsonutils.dumps({"TYPE": "230", "ID": qos_id}) result = self.call(url, data, 'DELETE') self._assert_rest_result(result, _('Delete QoS policy error.')) def get_qosid_by_fsid(self, fs_id): """Get QoS id by fs id.""" url = "/filesystem/" + fs_id result = self.call(url, None, "GET") self._assert_rest_result( result, _('Get QoS id by filesystem id error.')) return result['data'].get('IOCLASSID') def get_fs_list_in_qos(self, qos_id): """Get the filesystem list in QoS.""" qos_info = self.get_qos_info(qos_id) fs_list = [] fs_string = qos_info['FSLIST'][1:-1] for fs in fs_string.split(","): fs_id = fs[1:-1] fs_list.append(fs_id) return fs_list def get_qos_info(self, qos_id): """Get QoS information.""" url = "/ioclass/" + qos_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result['data'] def remove_fs_from_qos(self, fs_id, fs_list, qos_id): """Remove filesystem from QoS.""" fs_list = [i for i in fs_list if i != fs_id] url = "/ioclass/" + qos_id data = jsonutils.dumps({"FSLIST": fs_list, "TYPE": 230, "ID": qos_id}) result = self.call(url, data, "PUT") msg = _('Remove filesystem from QoS error.') self._assert_rest_result(result, msg) def _remove_fs_from_cache(self, fs_id, cache_id): url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" data = jsonutils.dumps({"ID": cache_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 273}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove filesystem from cache error.')) def get_all_eth_port(self): url = "/ETH_PORT" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all eth port error.')) all_eth = {} if "data" in result: all_eth = result['data'] return all_eth def get_eth_port_by_id(self, port_id): url = "/ETH_PORT/" + port_id result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get eth port by id error.')) if "data" in result: return result['data'] return None def get_all_bond_port(self): url = "/BOND_PORT" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all bond port error.')) all_bond = {} if "data" in result: all_bond = result['data'] return all_bond def get_port_id(self, port_name, port_type): if port_type == constants.PORT_TYPE_ETH: all_eth = self.get_all_eth_port() for item in all_eth: if port_name == item['LOCATION']: return item['ID'] elif port_type == constants.PORT_TYPE_BOND: all_bond = self.get_all_bond_port() for item in all_bond: if port_name == item['NAME']: return item['ID'] return None def get_all_vlan(self): url = "/vlan" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all vlan error.')) all_vlan = {} if "data" in result: all_vlan = result['data'] return all_vlan def get_vlan(self, port_id, vlan_tag): url = "/vlan" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get vlan error.')) vlan_tag = six.text_type(vlan_tag) if "data" in result: for item in result['data']: if port_id == item['PORTID'] and vlan_tag == item['TAG']: return True, item['ID'] return False, None def create_vlan(self, port_id, port_type, vlan_tag): url = "/vlan" data = jsonutils.dumps({"PORTID": port_id, "PORTTYPE": port_type, "TAG": six.text_type(vlan_tag), "TYPE": "280"}) result = self.call(url, data, "POST") self._assert_rest_result(result, _('Create vlan error.')) return result['data']['ID'] def check_vlan_exists_by_id(self, vlan_id): all_vlan = self.get_all_vlan() return any(vlan['ID'] == vlan_id for vlan in all_vlan) def delete_vlan(self, vlan_id): url = "/vlan/" + vlan_id result = self.call(url, None, 'DELETE') if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST: LOG.warning(_LW('Cannot delete vlan because there is ' 'a logical port on vlan.')) return self._assert_rest_result(result, _('Delete vlan error.')) def get_logical_port(self, home_port_id, ip, subnet): url = "/LIF" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get logical port error.')) if "data" not in result: return False, None for item in result['data']: if (home_port_id == item['HOMEPORTID'] and ip == item['IPV4ADDR'] and subnet == item['IPV4MASK']): if item['OPERATIONALSTATUS'] != 'true': self._activate_logical_port(item['ID']) return True, item['ID'] return False, None def _activate_logical_port(self, logical_port_id): url = "/LIF/" + logical_port_id data = jsonutils.dumps({"OPERATIONALSTATUS": "true"}) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Activate logical port error.')) def create_logical_port(self, home_port_id, home_port_type, ip, subnet): url = "/LIF" info = { "ADDRESSFAMILY": 0, "CANFAILOVER": "true", "HOMEPORTID": home_port_id, "HOMEPORTTYPE": home_port_type, "IPV4ADDR": ip, "IPV4GATEWAY": "", "IPV4MASK": subnet, "NAME": ip, "OPERATIONALSTATUS": "true", "ROLE": 2, "SUPPORTPROTOCOL": 3, "TYPE": "279", } data = jsonutils.dumps(info) result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Create logical port error.')) return result['data']['ID'] def check_logical_port_exists_by_id(self, logical_port_id): all_logical_port = self.get_all_logical_port() return any(port['ID'] == logical_port_id for port in all_logical_port) def get_all_logical_port(self): url = "/LIF" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all logical port error.')) all_logical_port = {} if "data" in result: all_logical_port = result['data'] return all_logical_port def delete_logical_port(self, logical_port_id): url = "/LIF/" + logical_port_id result = self.call(url, None, 'DELETE') self._assert_rest_result(result, _('Delete logical port error.')) def set_DNS_ip_address(self, dns_ip_list): if len(dns_ip_list) > 3: message = _('Most three ips can be set to DNS.') LOG.error(message) raise exception.InvalidInput(reason=message) url = "/DNS_Server" dns_info = { "ADDRESS": jsonutils.dumps(dns_ip_list), "TYPE": "260", } data = jsonutils.dumps(dns_info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Set DNS ip address error.')) if "data" in result: return result['data'] return None def get_DNS_ip_address(self): url = "/DNS_Server" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get DNS ip address error.')) ip_address = {} if "data" in result: ip_address = jsonutils.loads(result['data']['ADDRESS']) return ip_address def add_AD_config(self, user, password, domain, system_name): url = "/AD_CONFIG" info = { "ADMINNAME": user, "ADMINPWD": password, "DOMAINSTATUS": 1, "FULLDOMAINNAME": domain, "OU": "", "SYSTEMNAME": system_name, "TYPE": "16414", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Add AD config error.')) def delete_AD_config(self, user, password): url = "/AD_CONFIG" info = { "ADMINNAME": user, "ADMINPWD": password, "DOMAINSTATUS": 0, "TYPE": "16414", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Delete AD config error.')) def get_AD_config(self): url = "/AD_CONFIG" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get AD config error.')) if "data" in result: return result['data'] return None def get_AD_domain_name(self): result = self.get_AD_config() if result and result['DOMAINSTATUS'] == '1': return True, result['FULLDOMAINNAME'] return False, None def add_LDAP_config(self, server, domain): url = "/LDAP_CONFIG" info = { "BASEDN": domain, "LDAPSERVER": server, "PORTNUM": 389, "TRANSFERTYPE": "1", "TYPE": "16413", "USERNAME": "", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Add LDAP config error.')) def delete_LDAP_config(self): url = "/LDAP_CONFIG" result = self.call(url, None, 'DELETE') self._assert_rest_result(result, _('Delete LDAP config error.')) def get_LDAP_config(self): url = "/LDAP_CONFIG" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get LDAP config error.')) if "data" in result: return result['data'] return None def get_LDAP_domain_server(self): result = self.get_LDAP_config() if result and result['LDAPSERVER']: return True, result['LDAPSERVER'] return False, None def find_array_version(self): url = "/system/" result = self.call(url, None) self._assert_rest_result(result, _('Find array version error.')) return result['data']['PRODUCTVERSION'] manila-2.0.0/manila/share/drivers/huawei/v3/smartx.py0000664000567000056710000001744412701407107023647 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import excutils from oslo_utils import strutils from manila import exception from manila.i18n import _ from manila.share.drivers.huawei import constants class SmartPartition(object): def __init__(self, helper): self.helper = helper def add(self, opts, fsid): if not strutils.bool_from_string(opts['huawei_smartpartition']): return if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) partition_id = self.helper._get_partition_id_by_name( opts['partitionname']) if not partition_id: raise exception.InvalidInput( reason=_('Can not find partition id.')) self.helper._add_fs_to_partition(fsid, partition_id) class SmartCache(object): def __init__(self, helper): self.helper = helper def add(self, opts, fsid): if not strutils.bool_from_string(opts['huawei_smartcache']): return if not opts['cachename']: raise exception.InvalidInput( reason=_('Illegal value specified for cache.')) cache_id = self.helper._get_cache_id_by_name(opts['cachename']) if not cache_id: raise exception.InvalidInput( reason=(_('Can not find cache id by cache name %(name)s.') % {'name': opts['cachename']})) self.helper._add_fs_to_cache(fsid, cache_id) class SmartQos(object): def __init__(self, helper): self.helper = helper def create_qos(self, qos, fs_id): policy_id = None try: # Check QoS priority. if self._check_qos_high_priority(qos): self.helper.change_fs_priority_high(fs_id) # Create QoS policy and activate it. (qos_id, fs_list) = self.helper.find_available_qos(qos) if qos_id is not None: self.helper.add_share_to_qos(qos_id, fs_id, fs_list) else: policy_id = self.helper.create_qos_policy(qos, fs_id) self.helper.activate_deactivate_qos(policy_id, True) except exception.InvalidInput: with excutils.save_and_reraise_exception(): if policy_id is not None: self.helper.delete_qos_policy(policy_id) def _check_qos_high_priority(self, qos): """Check QoS priority.""" for key, value in qos.items(): if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): return True return False def delete_qos(self, qos_id): qos_info = self.helper.get_qos_info(qos_id) qos_status = qos_info['RUNNINGSTATUS'] if qos_status == constants.STATUS_QOS_ACTIVE: self.helper.activate_deactivate_qos(qos_id, False) self.helper.delete_qos_policy(qos_id) class SmartX(object): def __init__(self, helper): self.helper = helper def get_smartx_extra_specs_opts(self, opts): opts = self.get_capabilities_opts(opts, 'dedupe') opts = self.get_capabilities_opts(opts, 'compression') opts = self.get_smartprovisioning_opts(opts) opts = self.get_smartcache_opts(opts) opts = self.get_smartpartition_opts(opts) qos = self.get_qos_opts(opts) return opts, qos def get_capabilities_opts(self, opts, key): if strutils.bool_from_string(opts[key]): opts[key] = True else: opts[key] = False return opts def get_smartprovisioning_opts(self, opts): thin_provision = opts.get('thin_provisioning') if thin_provision is None: root = self.helper._read_xml() fstype = root.findtext('Filesystem/AllocType') if fstype: fstype = fstype.strip().strip('\n') if fstype == 'Thin': opts['LUNType'] = constants.ALLOC_TYPE_THIN_FLAG elif fstype == 'Thick': opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG else: err_msg = (_( 'Huawei config file is wrong. AllocType type must be ' 'set to "Thin" or "Thick". AllocType:%(fetchtype)s') % {'fetchtype': fstype}) raise exception.InvalidShare(reason=err_msg) else: opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG else: if strutils.bool_from_string(thin_provision): opts['LUNType'] = constants.ALLOC_TYPE_THIN_FLAG else: opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG return opts def get_smartcache_opts(self, opts): if strutils.bool_from_string(opts['huawei_smartcache']): if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'huawei_smartcache:cachename in key.')) else: opts['cachename'] = None return opts def get_smartpartition_opts(self, opts): if strutils.bool_from_string(opts['huawei_smartpartition']): if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) else: opts['partitionname'] = None return opts def get_qos_opts(self, opts): qos = {} if not strutils.bool_from_string(opts.get('qos')): return for key, value in opts.items(): if (key in constants.OPTS_QOS_VALUE) and value is not None: if (key.upper() != 'IOTYPE') and (int(value) <= 0): err_msg = (_('QoS config is wrong. %(key)s' ' must be set greater than 0.') % {'key': key}) raise exception.InvalidInput(reason=err_msg) elif ((key.upper() == 'IOTYPE') and (value not in ['0', '1', '2'])): raise exception.InvalidInput( reason=(_('Illegal value specified for IOTYPE: ' 'set to either 0, 1, or 2.'))) else: qos[key.upper()] = value if len(qos) <= 1 or 'IOTYPE' not in qos: msg = (_('QoS config is incomplete. Please set more. ' 'QoS policy: %(qos_policy)s.') % {'qos_policy': qos}) raise exception.InvalidInput(reason=msg) lowerlimit = constants.QOS_LOWER_LIMIT upperlimit = constants.QOS_UPPER_LIMIT if (set(lowerlimit).intersection(set(qos)) and set(upperlimit).intersection(set(qos))): msg = (_('QoS policy conflict, both protection policy and ' 'restriction policy are set. ' 'QoS policy: %(qos_policy)s ') % {'qos_policy': qos}) raise exception.InvalidInput(reason=msg) return qos manila-2.0.0/manila/share/drivers/huawei/v3/connection.py0000664000567000056710000020035012701407107024456 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import string import tempfile import time from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units import six from manila.common import constants as common_constants from manila.data import utils as data_utils from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila.share.drivers.huawei import base as driver from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import huawei_utils from manila.share.drivers.huawei.v3 import helper from manila.share.drivers.huawei.v3 import smartx from manila.share import share_types from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) class V3StorageConnection(driver.HuaweiBase): """Helper class for Huawei OceanStor V3 storage system.""" def __init__(self, configuration): super(V3StorageConnection, self).__init__(configuration) self.qos_support = False def connect(self): """Try to connect to V3 server.""" if self.configuration: self.helper = helper.RestHelper(self.configuration) else: raise exception.InvalidInput(_("Huawei configuration missing.")) self.helper.login() def create_share(self, share, share_server=None): """Create a share.""" share_name = share['name'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: msg = _("Pool is not available in the share host field.") raise exception.InvalidHost(reason=msg) result = self.helper._find_all_pool_info() poolinfo = self.helper._find_pool_info(pool_name, result) if not poolinfo: msg = (_("Can not find pool info by pool name: %s.") % pool_name) raise exception.InvalidHost(reason=msg) fs_id = None # We sleep here to ensure the newly created filesystem can be read. wait_interval = self._get_wait_interval() timeout = self._get_timeout() try: fs_id = self.allocate_container(share, poolinfo) fs = self.helper._get_fs_info_by_id(fs_id) end_time = time.time() + timeout while not (self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']) or time.time() > end_time): time.sleep(wait_interval) fs = self.helper._get_fs_info_by_id(fs_id) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): raise exception.InvalidShare( reason=(_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']})) except Exception as err: if fs_id is not None: qos_id = self.helper.get_qosid_by_fsid(fs_id) if qos_id: self.remove_qos_fs(fs_id, qos_id) self.helper._delete_fs(fs_id) message = (_('Failed to create share %(name)s.' 'Reason: %(err)s.') % {'name': share_name, 'err': err}) raise exception.InvalidShare(reason=message) try: self.helper._create_share(share_name, fs_id, share_proto) except Exception as err: if fs_id is not None: qos_id = self.helper.get_qosid_by_fsid(fs_id) if qos_id: self.remove_qos_fs(fs_id, qos_id) self.helper._delete_fs(fs_id) raise exception.InvalidShare( reason=(_('Failed to create share %(name)s. Reason: %(err)s.') % {'name': share_name, 'err': err})) ip = self._get_share_ip(share_server) location = self._get_location_path(share_name, share_proto, ip) return location def _get_share_ip(self, share_server): """"Get share logical ip.""" if share_server: ip = share_server['backend_details'].get('ip') else: root = self.helper._read_xml() ip = root.findtext('Storage/LogicalPortIP').strip() return ip def extend_share(self, share, new_size, share_server): share_proto = share['share_proto'] share_name = share['name'] # The unit is in sectors. size = int(new_size) * units.Mi * 2 share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: err_msg = (_("Can not get share ID by share %s.") % share_name) LOG.error(err_msg) raise exception.InvalidShareAccess(reason=err_msg) fsid = share['FSID'] fs_info = self.helper._get_fs_info_by_id(fsid) current_size = int(fs_info['CAPACITY']) / units.Mi / 2 if current_size >= new_size: err_msg = (_("New size for extend must be bigger than " "current size on array. (current: %(size)s, " "new: %(new_size)s).") % {'size': current_size, 'new_size': new_size}) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) self.helper._change_share_size(fsid, size) def shrink_share(self, share, new_size, share_server): """Shrinks size of existing share.""" share_proto = share['share_proto'] share_name = share['name'] # The unit is in sectors. size = int(new_size) * units.Mi * 2 share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: err_msg = (_("Can not get share ID by share %s.") % share_name) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) fsid = share['FSID'] fs_info = self.helper._get_fs_info_by_id(fsid) if not fs_info: err_msg = (_("Can not get filesystem info by filesystem ID: %s.") % fsid) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) current_size = int(fs_info['CAPACITY']) / units.Mi / 2 if current_size <= new_size: err_msg = (_("New size for shrink must be less than current " "size on array. (current: %(size)s, " "new: %(new_size)s).") % {'size': current_size, 'new_size': new_size}) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) if fs_info['ALLOCTYPE'] != constants.ALLOC_TYPE_THIN_FLAG: err_msg = (_("Share (%s) can not be shrunk. only 'Thin' shares " "support shrink.") % share_name) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) self.helper._change_share_size(fsid, size) def check_fs_status(self, health_status, running_status): if (health_status == constants.STATUS_FS_HEALTH and running_status == constants.STATUS_FS_RUNNING): return True else: return False def assert_filesystem(self, fsid): fs = self.helper._get_fs_info_by_id(fsid) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): err_msg = (_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']}) raise exception.StorageResourceException(err_msg) def create_snapshot(self, snapshot, share_server=None): """Create a snapshot.""" snap_name = snapshot['id'] share_proto = snapshot['share']['share_proto'] share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(snapshot['share_name'], share_url_type) if not share: err_msg = _('Can not create snapshot,' ' because share id is not provided.') LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) sharefsid = share['FSID'] snapshot_name = "share_snapshot_" + snap_name snap_id = self.helper._create_snapshot(sharefsid, snapshot_name) LOG.info(_LI('Creating snapshot id %s.'), snap_id) def delete_snapshot(self, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug("Delete a snapshot.") snap_name = snapshot['id'] sharefsid = self.helper._get_fsid_by_name(snapshot['share_name']) if sharefsid is None: LOG.warning(_LW('Delete snapshot share id %s fs has been ' 'deleted.'), snap_name) return snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name) snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_id) if snapshot_flag: self.helper._delete_snapshot(snapshot_id) else: LOG.warning(_LW("Can not find snapshot %s on array."), snap_name) def update_share_stats(self, stats_dict): """Retrieve status info from share group.""" root = self.helper._read_xml() all_pool_info = self.helper._find_all_pool_info() stats_dict["pools"] = [] pool_name_list = root.findtext('Filesystem/StoragePool') pool_name_list = pool_name_list.split(";") for pool_name in pool_name_list: pool_name = pool_name.strip().strip('\n') capacity = self._get_capacity(pool_name, all_pool_info) if capacity: pool = dict( pool_name=pool_name, total_capacity_gb=capacity['TOTALCAPACITY'], free_capacity_gb=capacity['CAPACITY'], provisioned_capacity_gb=( capacity['PROVISIONEDCAPACITYGB']), max_over_subscription_ratio=( self.configuration.safe_get( 'max_over_subscription_ratio')), allocated_capacity_gb=capacity['CONSUMEDCAPACITY'], qos=self._get_qos_capability(), reserved_percentage=0, thin_provisioning=[True, False], dedupe=[True, False], compression=[True, False], huawei_smartcache=[True, False], huawei_smartpartition=[True, False], ) stats_dict["pools"].append(pool) if not stats_dict["pools"]: err_msg = _("The StoragePool is None.") LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) def _get_qos_capability(self): version = self.helper.find_array_version() if version.upper() >= constants.MIN_ARRAY_VERSION_FOR_QOS: self.qos_support = True else: self.qos_support = False return self.qos_support def delete_share(self, share, share_server=None): """Delete share.""" share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW('The share was not found. Share name:%s'), share_name) fsid = self.helper._get_fsid_by_name(share_name) if fsid: self.helper._delete_fs(fsid) return LOG.warning(_LW('The filesystem was not found.')) return share_id = share['ID'] share_fs_id = share['FSID'] if share_id: self.helper._delete_share_by_id(share_id, share_url_type) if share_fs_id: if self.qos_support: qos_id = self.helper.get_qosid_by_fsid(share_fs_id) if qos_id: self.remove_qos_fs(share_fs_id, qos_id) self.helper._delete_fs(share_fs_id) return share def create_share_from_snapshot(self, share, snapshot, share_server=None): """Create a share from snapshot.""" share_fs_id = self.helper._get_fsid_by_name(snapshot['share_name']) if not share_fs_id: err_msg = (_("The source filesystem of snapshot %s " "does not exist.") % snapshot['snapshot_id']) LOG.error(err_msg) raise exception.StorageResourceNotFound( name=snapshot['share_name']) snapshot_id = self.helper._get_snapshot_id(share_fs_id, snapshot['id']) snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_id) if not snapshot_flag: err_msg = (_("Cannot find snapshot %s on array.") % snapshot['snapshot_id']) LOG.error(err_msg) raise exception.ShareSnapshotNotFound( snapshot_id=snapshot['snapshot_id']) self.assert_filesystem(share_fs_id) old_share_name = self.helper.get_share_name_by_id( snapshot['share_id']) old_share_proto = self._get_share_proto(old_share_name) if not old_share_proto: err_msg = (_("Cannot find source share %(share)s of " "snapshot %(snapshot)s on array.") % {'share': snapshot['share_id'], 'snapshot': snapshot['snapshot_id']}) LOG.error(err_msg) raise exception.ShareResourceNotFound( share_id=snapshot['share_id']) new_share_path = self.create_share(share) new_share = { "share_proto": share['share_proto'], "size": share['size'], "name": share['name'], "mount_path": new_share_path.replace("\\", "/"), "mount_src": tempfile.mkdtemp(prefix=constants.TMP_PATH_DST_PREFIX), } old_share_path = self._get_location_path(old_share_name, old_share_proto) old_share = { "share_proto": old_share_proto, "name": old_share_name, "mount_path": old_share_path.replace("\\", "/"), "mount_src": tempfile.mkdtemp(prefix=constants.TMP_PATH_SRC_PREFIX), "snapshot_name": ("share_snapshot_" + snapshot['id'].replace("-", "_")) } try: self.copy_data_from_parent_share(old_share, new_share) except Exception: with excutils.save_and_reraise_exception(): self.delete_share(new_share) finally: for item in (new_share, old_share): try: os.rmdir(item['mount_src']) except Exception as err: err_msg = (_('Failed to remove temp file. ' 'File path: %(file_path)s. Reason: %(err)s.') % {'file_path': item['mount_src'], 'err': six.text_type(err)}) LOG.warning(err_msg) return new_share_path def copy_data_from_parent_share(self, old_share, new_share): old_access = self.get_access(old_share) old_access_id = self._get_access_id(old_share, old_access) if not old_access_id: try: self.allow_access(old_share, old_access) except exception.ManilaException as err: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to add access to share %(name)s. ' 'Reason: %(err)s.'), {'name': old_share['name'], 'err': six.text_type(err)}) new_access = self.get_access(new_share) try: try: self.mount_share_to_host(old_share, old_access) except exception.ShareMountException as err: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to mount old share %(name)s. ' 'Reason: %(err)s.'), {'name': old_share['name'], 'err': six.text_type(err)}) try: self.allow_access(new_share, new_access) self.mount_share_to_host(new_share, new_access) except Exception as err: with excutils.save_and_reraise_exception(): self.umount_share_from_host(old_share) LOG.error(_LE('Failed to mount new share %(name)s. ' 'Reason: %(err)s.'), {'name': new_share['name'], 'err': six.text_type(err)}) copied = self.copy_snapshot_data(old_share, new_share) for item in (new_share, old_share): try: self.umount_share_from_host(item) except exception.ShareUmountException as err: LOG.warning(_LW('Failed to unmount share %(name)s. ' 'Reason: %(err)s.'), {'name': item['name'], 'err': six.text_type(err)}) self.deny_access(new_share, new_access) if copied: LOG.debug("Created share from snapshot successfully, " "new_share: %s, old_share: %s.", new_share, old_share) else: message = (_('Failed to copy data from share %(old_share)s ' 'to share %(new_share)s.') % {'old_share': old_share['name'], 'new_share': new_share['name']}) raise exception.ShareCopyDataException(reason=message) finally: if not old_access_id: self.deny_access(old_share, old_access) def get_access(self, share): share_proto = share['share_proto'] access = {} root = self.helper._read_xml() if share_proto == 'NFS': access['access_to'] = root.findtext('Filesystem/NFSClient/IP') access['access_level'] = common_constants.ACCESS_LEVEL_RW access['access_type'] = 'ip' elif share_proto == 'CIFS': access['access_to'] = root.findtext( 'Filesystem/CIFSClient/UserName') access['access_password'] = root.findtext( 'Filesystem/CIFSClient/UserPassword') access['access_level'] = common_constants.ACCESS_LEVEL_RW access['access_type'] = 'user' LOG.debug("Get access for share: %s, access_type: %s, access_to: %s, " "access_level: %s", share['name'], access['access_type'], access['access_to'], access['access_level']) return access def _get_access_id(self, share, access): """Get access id of the share.""" access_id = None share_name = share['name'] share_proto = share['share_proto'] share_url_type = self.helper._get_share_url_type(share_proto) access_to = access['access_to'] share = self.helper._get_share_by_name(share_name, share_url_type) access_id = self.helper._get_access_from_share(share['ID'], access_to, share_proto) if access_id is None: LOG.debug('Cannot get access ID from share. ' 'share_name: %s', share_name) return access_id def copy_snapshot_data(self, old_share, new_share): src_path = '/'.join((old_share['mount_src'], '.snapshot', old_share['snapshot_name'])) dst_path = new_share['mount_src'] copy_finish = False LOG.debug("Copy data from src_path: %s to dst_path: %s.", src_path, dst_path) try: ignore_list = '' copy = data_utils.Copy(src_path, dst_path, ignore_list) copy.run() if copy.get_progress()['total_progress'] == 100: copy_finish = True except Exception as err: err_msg = (_("Failed to copy data, reason: %s.") % six.text_type(err)) LOG.error(err_msg) return copy_finish def umount_share_from_host(self, share): try: utils.execute('umount', share['mount_path'], run_as_root=True) except Exception as err: message = (_("Failed to unmount share %(share)s. " "Reason: %(reason)s.") % {'share': share['name'], 'reason': six.text_type(err)}) raise exception.ShareUmountException(reason=message) def mount_share_to_host(self, share, access): LOG.debug("Mounting share: %s to host, mount_src: %s", share['name'], share['mount_src']) try: if share['share_proto'] == 'NFS': utils.execute('mount', '-t', 'nfs', share['mount_path'], share['mount_src'], run_as_root=True) LOG.debug("Execute mount. mount_src: %s", share['mount_src']) elif share['share_proto'] == 'CIFS': user = ('user=' + access['access_to'] + ',' + 'password=' + access['access_password']) utils.execute('mount', '-t', 'cifs', share['mount_path'], share['mount_src'], '-o', user, run_as_root=True) except Exception as err: message = (_('Bad response from mount share: %(share)s. ' 'Reason: %(reason)s.') % {'share': share['name'], 'reason': six.text_type(err)}) raise exception.ShareMountException(reason=message) def get_network_allocations_number(self): """Get number of network interfaces to be created.""" if self.configuration.driver_handles_share_servers: return constants.IP_ALLOCATIONS_DHSS_TRUE else: return constants.IP_ALLOCATIONS_DHSS_FALSE def _get_capacity(self, pool_name, result): """Get free capacity and total capacity of the pools.""" poolinfo = self.helper._find_pool_info(pool_name, result) if poolinfo: total = float(poolinfo['TOTALCAPACITY']) / units.Mi / 2 free = float(poolinfo['CAPACITY']) / units.Mi / 2 consumed = float(poolinfo['CONSUMEDCAPACITY']) / units.Mi / 2 poolinfo['TOTALCAPACITY'] = total poolinfo['CAPACITY'] = free poolinfo['CONSUMEDCAPACITY'] = consumed poolinfo['PROVISIONEDCAPACITYGB'] = round( float(total) - float(free), 2) return poolinfo def _init_filesys_para(self, share, poolinfo, extra_specs): """Init basic filesystem parameters.""" name = share['name'] size = int(share['size']) * units.Mi * 2 fileparam = { "NAME": name.replace("-", "_"), "DESCRIPTION": "", "ALLOCTYPE": extra_specs['LUNType'], "CAPACITY": size, "PARENTID": poolinfo['ID'], "INITIALALLOCCAPACITY": units.Ki * 20, "PARENTTYPE": 216, "SNAPSHOTRESERVEPER": 20, "INITIALDISTRIBUTEPOLICY": 0, "ISSHOWSNAPDIR": True, "RECYCLESWITCH": 0, "RECYCLEHOLDTIME": 15, "RECYCLETHRESHOLD": 0, "RECYCLEAUTOCLEANSWITCH": 0, "ENABLEDEDUP": extra_specs['dedupe'], "ENABLECOMPRESSION": extra_specs['compression'], } if fileparam['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK_FLAG: if (extra_specs['dedupe'] or extra_specs['compression']): err_msg = _( 'The filesystem type is "Thick",' ' so dedupe or compression cannot be set.') LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return fileparam def deny_access(self, share, access, share_server=None): """Deny access to share.""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) access_type = access['access_type'] if share_proto == 'NFS' and access_type not in ('ip', 'user'): LOG.warning(_LW('Only IP or USER access types are allowed for ' 'NFS shares.')) return elif share_proto == 'CIFS' and access_type != 'user': LOG.warning(_LW('Only USER access type is allowed for' ' CIFS shares.')) return access_to = access['access_to'] share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW('Can not get share %s.'), share_name) return access_id = self.helper._get_access_from_share(share['ID'], access_to, share_proto) if not access_id: LOG.warning(_LW('Can not get access id from share. ' 'share_name: %s'), share_name) return self.helper._remove_access_from_share(access_id, share_proto) def allow_access(self, share, access, share_server=None): """Allow access to the share.""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] if access_level not in common_constants.ACCESS_LEVELS: raise exception.InvalidShareAccess( reason=(_('Unsupported level of access was provided - %s') % access_level)) if share_proto == 'NFS': if access_type == 'user': # Use 'user' as 'netgroup' for NFS. # A group name starts with @. access_to = '@' + access_to elif access_type != 'ip': message = _('Only IP or USER access types ' 'are allowed for NFS shares.') raise exception.InvalidShareAccess(reason=message) if access_level == common_constants.ACCESS_LEVEL_RW: access_level = constants.ACCESS_NFS_RW else: access_level = constants.ACCESS_NFS_RO elif share_proto == 'CIFS': if access_type == 'user': if access_level == common_constants.ACCESS_LEVEL_RW: access_level = constants.ACCESS_CIFS_FULLCONTROL else: access_level = constants.ACCESS_CIFS_RO else: message = _('Only USER access type is allowed' ' for CIFS shares.') raise exception.InvalidShareAccess(reason=message) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: err_msg = (_("Can not get share ID by share %s.") % share_name) LOG.error(err_msg) raise exception.InvalidShareAccess(reason=err_msg) share_id = share['ID'] # Check if access already exists access_id = self.helper._get_access_from_share(share_id, access_to, share_proto) if access_id: # Check if the access level equal level_exist = self.helper._get_level_by_access_id(access_id, share_proto) if level_exist != access_level: # Change the access level self.helper._change_access_rest(access_id, share_proto, access_level) else: # Add this access to share self.helper._allow_access_rest(share_id, access_to, share_proto, access_level) def clear_access(self, share, share_server=None): """Remove all access rules of the share""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) share_stor = self.helper._get_share_by_name(share_name, share_url_type) if not share_stor: LOG.warning(_LW('Cannot get share %s.'), share_name) return share_id = share_stor['ID'] all_accesses = self.helper._get_all_access_from_share(share_id, share_proto) for access_id in all_accesses: self.helper._remove_access_from_share(access_id, share_proto) def update_access(self, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules list.""" if not (add_rules or delete_rules): self.clear_access(share, share_server) for access in access_rules: self.allow_access(share, access, share_server) else: for access in delete_rules: self.deny_access(share, access, share_server) for access in add_rules: self.allow_access(share, access, share_server) def get_pool(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') if pool_name: return pool_name share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) pool_name = None if share: pool = self.helper._get_fs_info_by_id(share['FSID']) pool_name = pool['POOLNAME'] return pool_name def allocate_container(self, share, poolinfo): """Creates filesystem associated to share by name.""" opts = huawei_utils.get_share_extra_specs_params( share['share_type_id']) if opts is None: opts = constants.OPTS_CAPABILITIES smart = smartx.SmartX(self.helper) smartx_opts, qos = smart.get_smartx_extra_specs_opts(opts) fileParam = self._init_filesys_para(share, poolinfo, smartx_opts) fsid = self.helper._create_filesystem(fileParam) try: if qos: smart_qos = smartx.SmartQos(self.helper) smart_qos.create_qos(qos, fsid) smartpartition = smartx.SmartPartition(self.helper) smartpartition.add(opts, fsid) smartcache = smartx.SmartCache(self.helper) smartcache.add(opts, fsid) except Exception as err: if fsid is not None: qos_id = self.helper.get_qosid_by_fsid(fsid) if qos_id: self.remove_qos_fs(fsid, qos_id) self.helper._delete_fs(fsid) message = (_('Failed to add smartx. Reason: %(err)s.') % {'err': err}) raise exception.InvalidShare(reason=message) return fsid def manage_existing(self, share, driver_options): """Manage existing share.""" share_proto = share['share_proto'] share_name = share['name'] old_export_location = share['export_locations'][0]['path'] pool_name = share_utils.extract_host(share['host'], level='pool') share_url_type = self.helper._get_share_url_type(share_proto) old_share_name = self.helper._get_share_name_by_export_location( old_export_location, share_proto) share_storage = self.helper._get_share_by_name(old_share_name, share_url_type) if not share_storage: err_msg = (_("Can not get share ID by share %s.") % old_export_location) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) fs_id = share_storage['FSID'] fs = self.helper._get_fs_info_by_id(fs_id) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): raise exception.InvalidShare( reason=(_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']})) if pool_name and pool_name != fs['POOLNAME']: raise exception.InvalidHost( reason=(_('The current pool(%(fs_pool)s) of filesystem ' 'does not match the input pool(%(host_pool)s).') % {'fs_pool': fs['POOLNAME'], 'host_pool': pool_name})) result = self.helper._find_all_pool_info() poolinfo = self.helper._find_pool_info(pool_name, result) opts = huawei_utils.get_share_extra_specs_params( share['share_type_id']) specs = share_types.get_share_type_extra_specs(share['share_type_id']) if ('capabilities:thin_provisioning' not in specs.keys() and 'thin_provisioning' not in specs.keys()): if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG: opts['thin_provisioning'] = constants.THIN_PROVISIONING else: opts['thin_provisioning'] = constants.THICK_PROVISIONING change_opts = self.check_retype_change_opts(opts, poolinfo, fs) LOG.info(_LI('Retyping share (%(share)s), changed options are : ' '(%(change_opts)s).'), {'share': old_share_name, 'change_opts': change_opts}) try: self.retype_share(change_opts, fs_id) except Exception as err: message = (_("Retype share error. Share: %(share)s. " "Reason: %(reason)s.") % {'share': old_share_name, 'reason': err}) raise exception.InvalidShare(reason=message) share_size = int(fs['CAPACITY']) / units.Mi / 2 self.helper._change_fs_name(fs_id, share_name) location = self._get_location_path(share_name, share_proto) return (share_size, [location]) def check_retype_change_opts(self, opts, poolinfo, fs): change_opts = { "partitionid": None, "cacheid": None, "dedupe&compression": None, } # SmartPartition old_partition_id = fs['SMARTPARTITIONID'] old_partition_name = None new_partition_id = None new_partition_name = None if strutils.bool_from_string(opts['huawei_smartpartition']): if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) new_partition_name = opts['partitionname'] new_partition_id = self.helper._get_partition_id_by_name( new_partition_name) if new_partition_id is None: raise exception.InvalidInput( reason=(_("Can't find partition name on the array, " "partition name is: %(name)s.") % {"name": new_partition_name})) if old_partition_id != new_partition_id: if old_partition_id: partition_info = self.helper.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts["partitionid"] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # SmartCache old_cache_id = fs['SMARTCACHEID'] old_cache_name = None new_cache_id = None new_cache_name = None if strutils.bool_from_string(opts['huawei_smartcache']): if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'huawei_smartcache:cachename in key.')) new_cache_name = opts['cachename'] new_cache_id = self.helper._get_cache_id_by_name( new_cache_name) if new_cache_id is None: raise exception.InvalidInput( reason=(_("Can't find cache name on the array, " "cache name is: %(name)s.") % {"name": new_cache_name})) if old_cache_id != new_cache_id: if old_cache_id: cache_info = self.helper.get_cache_info_by_id( old_cache_id) old_cache_name = cache_info['NAME'] change_opts["cacheid"] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # SmartDedupe&SmartCompression smartx_opts = constants.OPTS_CAPABILITIES if opts is not None: smart = smartx.SmartX(self.helper) smartx_opts, qos = smart.get_smartx_extra_specs_opts(opts) old_compression = fs['COMPRESSION'] new_compression = smartx_opts['compression'] old_dedupe = fs['DEDUP'] new_dedupe = smartx_opts['dedupe'] if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG: fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THIN else: fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THICK if strutils.bool_from_string(opts['thin_provisioning']): opts['thin_provisioning'] = constants.ALLOC_TYPE_THIN else: opts['thin_provisioning'] = constants.ALLOC_TYPE_THICK if fs['ALLOCTYPE'] != opts['thin_provisioning']: msg = (_("Manage existing share " "fs type and new_share_type mismatch. " "fs type is: %(fs_type)s, " "new_share_type is: %(new_share_type)s") % {"fs_type": fs['ALLOCTYPE'], "new_share_type": opts['thin_provisioning']}) raise exception.InvalidHost(reason=msg) else: if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK: if new_compression or new_dedupe: raise exception.InvalidInput( reason=_("Dedupe or compression cannot be set for " "thick filesystem.")) else: if (old_dedupe != new_dedupe or old_compression != new_compression): change_opts["dedupe&compression"] = ([old_dedupe, old_compression], [new_dedupe, new_compression]) return change_opts def retype_share(self, change_opts, fs_id): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.helper._remove_fs_from_partition(fs_id, old_id) if new_id: self.helper._add_fs_to_partition(fs_id, new_id) msg = (_("Retype FS(id: %(fs_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) " "performed successfully.") % {"fs_id": fs_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) LOG.info(msg) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.helper._remove_fs_from_cache(fs_id, old_id) if new_id: self.helper._add_fs_to_cache(fs_id, new_id) msg = (_("Retype FS(id: %(fs_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) " "performed successfully.") % {"fs_id": fs_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) LOG.info(msg) if change_opts.get('dedupe&compression'): old, new = change_opts['dedupe&compression'] old_dedupe = old[0] old_compression = old[1] new_dedupe = new[0] new_compression = new[1] if ((old_dedupe != new_dedupe) or (old_compression != new_compression)): new_smartx_opts = {"dedupe": new_dedupe, "compression": new_compression} self.helper._change_extra_specs(fs_id, new_smartx_opts) msg = (_("Retype FS(id: %(fs_id)s) dedupe from %(old_dedupe)s " "to %(new_dedupe)s performed successfully, " "compression from " "%(old_compression)s to %(new_compression)s " "performed successfully.") % {"fs_id": fs_id, "old_dedupe": old_dedupe, "new_dedupe": new_dedupe, "old_compression": old_compression, "new_compression": new_compression}) LOG.info(msg) def remove_qos_fs(self, fs_id, qos_id): fs_list = self.helper.get_fs_list_in_qos(qos_id) fs_count = len(fs_list) if fs_count <= 1: qos = smartx.SmartQos(self.helper) qos.delete_qos(qos_id) else: self.helper.remove_fs_from_qos(fs_id, fs_list, qos_id) def _get_location_path(self, share_name, share_proto, ip=None): location = None if ip is None: root = self.helper._read_xml() ip = root.findtext('Storage/LogicalPortIP').strip() if share_proto == 'NFS': location = '%s:/%s' % (ip, share_name.replace("-", "_")) elif share_proto == 'CIFS': location = '\\\\%s\\%s' % (ip, share_name.replace("-", "_")) else: raise exception.InvalidShareAccess( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return location def _get_share_proto(self, share_name): share_proto = None for proto in ('NFS', 'CIFS'): share_url_type = self.helper._get_share_url_type(proto) share = self.helper._get_share_by_name(share_name, share_url_type) if share: share_proto = proto break return share_proto def _get_wait_interval(self): """Get wait interval from huawei conf file.""" root = self.helper._read_xml() wait_interval = root.findtext('Filesystem/WaitInterval') if wait_interval: return int(wait_interval) else: LOG.info(_LI( "Wait interval is not configured in huawei " "conf file. Use default: %(default_wait_interval)d."), {"default_wait_interval": constants.DEFAULT_WAIT_INTERVAL}) return constants.DEFAULT_WAIT_INTERVAL def _get_timeout(self): """Get timeout from huawei conf file.""" root = self.helper._read_xml() timeout = root.findtext('Filesystem/Timeout') if timeout: return int(timeout) else: LOG.info(_LI( "Timeout is not configured in huawei conf file. " "Use default: %(default_timeout)d."), {"default_timeout": constants.DEFAULT_TIMEOUT}) return constants.DEFAULT_TIMEOUT def check_conf_file(self): """Check the config file, make sure the essential items are set.""" root = self.helper._read_xml() resturl = root.findtext('Storage/RestURL') username = root.findtext('Storage/UserName') pwd = root.findtext('Storage/UserPassword') product = root.findtext('Storage/Product') pool_node = root.findtext('Filesystem/StoragePool') logical_port_ip = root.findtext('Storage/LogicalPortIP') if product != "V3": err_msg = (_( 'check_conf_file: Config file invalid. ' 'Product must be set to V3.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not (resturl and username and pwd): err_msg = (_( 'check_conf_file: Config file invalid. RestURL,' ' UserName and UserPassword must be set.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not pool_node: err_msg = (_( 'check_conf_file: Config file invalid. ' 'StoragePool must be set.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not (self.configuration.driver_handles_share_servers or logical_port_ip): err_msg = (_( 'check_conf_file: Config file invalid. LogicalPortIP ' 'must be set when driver_handles_share_servers is False.')) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) def check_service(self): running_status = self.helper._get_cifs_service_status() if running_status != constants.STATUS_SERVICE_RUNNING: self.helper._start_cifs_service_status() service = self.helper._get_nfs_service_status() if ((service['RUNNINGSTATUS'] != constants.STATUS_SERVICE_RUNNING) or (service['SUPPORTV3'] == 'false') or (service['SUPPORTV4'] == 'false')): self.helper._start_nfs_service_status() def setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" self._check_network_type_validate(network_info['network_type']) vlan_tag = network_info['segmentation_id'] or 0 ip = network_info['network_allocations'][0]['ip_address'] subnet = utils.cidr_to_netmask(network_info['cidr']) if not utils.is_valid_ip_address(ip, '4'): err_msg = (_( "IP (%s) is invalid. Only IPv4 addresses are supported.") % ip) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) ad_created = False ldap_created = False try: if network_info.get('security_services'): active_directory, ldap = self._get_valid_security_service( network_info.get('security_services')) # Configure AD or LDAP Domain. if active_directory: self._configure_AD_domain(active_directory) ad_created = True if ldap: self._configure_LDAP_domain(ldap) ldap_created = True # Create vlan and logical_port. vlan_id, logical_port_id = ( self._create_vlan_and_logical_port(vlan_tag, ip, subnet)) except exception.ManilaException: if ad_created: dns_ip_list = [] user = active_directory['user'] password = active_directory['password'] self.helper.set_DNS_ip_address(dns_ip_list) self.helper.delete_AD_config(user, password) self._check_AD_expected_status(constants.STATUS_EXIT_DOMAIN) if ldap_created: self.helper.delete_LDAP_config() raise return { 'share_server_name': network_info['server_id'], 'share_server_id': network_info['server_id'], 'vlan_id': vlan_id, 'logical_port_id': logical_port_id, 'ip': ip, 'subnet': subnet, 'vlan_tag': vlan_tag, 'ad_created': ad_created, 'ldap_created': ldap_created, } def _check_network_type_validate(self, network_type): if network_type not in ('flat', 'vlan', None): err_msg = (_( 'Invalid network type. Network type must be flat or vlan.')) raise exception.NetworkBadConfigurationException(reason=err_msg) def _get_valid_security_service(self, security_services): """Validate security services and return AD/LDAP config.""" service_number = len(security_services) err_msg = _("Unsupported security services. " "Only AD and LDAP are supported.") if service_number > 2: LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) active_directory = None ldap = None for ss in security_services: if ss['type'] == 'active_directory': active_directory = ss elif ss['type'] == 'ldap': ldap = ss else: LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return active_directory, ldap def _configure_AD_domain(self, active_directory): dns_ip = active_directory['dns_ip'] user = active_directory['user'] password = active_directory['password'] domain = active_directory['domain'] if not (dns_ip and user and password and domain): raise exception.InvalidInput( reason=_("dns_ip or user or password or domain " "in security_services is None.")) # Check DNS server exists or not. ip_address = self.helper.get_DNS_ip_address() if ip_address and ip_address[0]: err_msg = (_("DNS server (%s) has already been configured.") % ip_address[0]) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Check AD config exists or not. ad_exists, AD_domain = self.helper.get_AD_domain_name() if ad_exists: err_msg = (_("AD domain (%s) has already been configured.") % AD_domain) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Set DNS server ip. dns_ip_list = dns_ip.split(",") DNS_config = self.helper.set_DNS_ip_address(dns_ip_list) # Set AD config. digits = string.digits random_id = ''.join([random.choice(digits) for i in range(9)]) system_name = constants.SYSTEM_NAME_PREFIX + random_id try: self.helper.add_AD_config(user, password, domain, system_name) self._check_AD_expected_status(constants.STATUS_JOIN_DOMAIN) except exception.ManilaException as err: if DNS_config: dns_ip_list = [] self.helper.set_DNS_ip_address(dns_ip_list) raise exception.InvalidShare( reason=(_('Failed to add AD config. ' 'Reason: %s.') % err)) def _check_AD_expected_status(self, expected_status): wait_interval = self._get_wait_interval() timeout = self._get_timeout() retries = timeout / wait_interval interval = wait_interval backoff_rate = 1 @utils.retry(exception.InvalidShare, interval, retries, backoff_rate) def _check_AD_status(): ad = self.helper.get_AD_config() if ad['DOMAINSTATUS'] != expected_status: raise exception.InvalidShare( reason=(_('AD domain (%s) status is not expected.') % ad['FULLDOMAINNAME'])) _check_AD_status() def _configure_LDAP_domain(self, ldap): server = ldap['server'] domain = ldap['domain'] if not server or not domain: raise exception.InvalidInput(reason=_("Server or domain is None.")) # Check LDAP config exists or not. ldap_exists, LDAP_domain = self.helper.get_LDAP_domain_server() if ldap_exists: err_msg = (_("LDAP domain (%s) has already been configured.") % LDAP_domain) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Set LDAP config. server_number = len(server.split(',')) if server_number == 1: server = server + ",," elif server_number == 2: server = server + "," elif server_number > 3: raise exception.InvalidInput( reason=_("Cannot support more than three LDAP servers.")) self.helper.add_LDAP_config(server, domain) def _create_vlan_and_logical_port(self, vlan_tag, ip, subnet): optimal_port, port_type = self._get_optimal_port(vlan_tag) port_id = self.helper.get_port_id(optimal_port, port_type) home_port_id = port_id home_port_type = port_type vlan_id = 0 vlan_exists = True if port_type is None or port_id is None: err_msg = _("No appropriate port found to create logical port.") LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if vlan_tag: vlan_exists, vlan_id = self.helper.get_vlan(port_id, vlan_tag) if not vlan_exists: # Create vlan. vlan_id = self.helper.create_vlan( port_id, port_type, vlan_tag) home_port_id = vlan_id home_port_type = constants.PORT_TYPE_VLAN logical_port_exists, logical_port_id = ( self.helper.get_logical_port(home_port_id, ip, subnet)) if not logical_port_exists: try: # Create logical port. logical_port_id = ( self.helper.create_logical_port( home_port_id, home_port_type, ip, subnet)) except exception.ManilaException as err: if not vlan_exists: self.helper.delete_vlan(vlan_id) raise exception.InvalidShare( reason=(_('Failed to create logical port. ' 'Reason: %s.') % err)) return vlan_id, logical_port_id def _get_optimal_port(self, vlan_tag): """Get an optimal physical port or bond port.""" root = self.helper._read_xml() port_info = [] port_list = root.findtext('Storage/Port') if port_list: port_list = port_list.split(";") for port in port_list: port = port.strip().strip('\n') if port: port_info.append(port) eth_port, bond_port = self._get_online_port(port_info) if vlan_tag: optimal_port, port_type = ( self._get_least_port(eth_port, bond_port, sort_type=constants.SORT_BY_VLAN)) else: optimal_port, port_type = ( self._get_least_port(eth_port, bond_port, sort_type=constants.SORT_BY_LOGICAL)) if not optimal_port: err_msg = (_("Cannot find optimal port. port_info: %s.") % port_info) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return optimal_port, port_type def _get_online_port(self, all_port_list): eth_port = self.helper.get_all_eth_port() bond_port = self.helper.get_all_bond_port() eth_status = constants.STATUS_ETH_RUNNING online_eth_port = [] for eth in eth_port: if (eth_status == eth['RUNNINGSTATUS'] and not eth['IPV4ADDR'] and not eth['BONDNAME']): online_eth_port.append(eth['LOCATION']) online_bond_port = [] for bond in bond_port: if eth_status == bond['RUNNINGSTATUS']: port_id = jsonutils.loads(bond['PORTIDLIST']) bond_eth_port = self.helper.get_eth_port_by_id(port_id[0]) if bond_eth_port and not bond_eth_port['IPV4ADDR']: online_bond_port.append(bond['NAME']) filtered_eth_port = [] filtered_bond_port = [] if len(all_port_list) == 0: filtered_eth_port = online_eth_port filtered_bond_port = online_bond_port else: all_port_list = list(set(all_port_list)) for port in all_port_list: is_eth_port = False for eth in online_eth_port: if port == eth: filtered_eth_port.append(port) is_eth_port = True break if is_eth_port: continue for bond in online_bond_port: if port == bond: filtered_bond_port.append(port) break return filtered_eth_port, filtered_bond_port def _get_least_port(self, eth_port, bond_port, sort_type): sorted_eth = [] sorted_bond = [] if sort_type == constants.SORT_BY_VLAN: _get_sorted_least_port = self._get_sorted_least_port_by_vlan else: _get_sorted_least_port = self._get_sorted_least_port_by_logical if eth_port: sorted_eth = _get_sorted_least_port(eth_port) if bond_port: sorted_bond = _get_sorted_least_port(bond_port) if sorted_eth and sorted_bond: if sorted_eth[1] >= sorted_bond[1]: return sorted_bond[0], constants.PORT_TYPE_BOND else: return sorted_eth[0], constants.PORT_TYPE_ETH elif sorted_eth: return sorted_eth[0], constants.PORT_TYPE_ETH elif sorted_bond: return sorted_bond[0], constants.PORT_TYPE_BOND else: return None, None def _get_sorted_least_port_by_vlan(self, port_list): if not port_list: return None vlan_list = self.helper.get_all_vlan() count = {} for item in port_list: count[item] = 0 for item in port_list: for vlan in vlan_list: pos = vlan['NAME'].rfind('.') if vlan['NAME'][:pos] == item: count[item] += 1 sort_port = sorted(count.items(), key=lambda count: count[1]) return sort_port[0] def _get_sorted_least_port_by_logical(self, port_list): if not port_list: return None logical_list = self.helper.get_all_logical_port() count = {} for item in port_list: count[item] = 0 for logical in logical_list: if logical['HOMEPORTTYPE'] == constants.PORT_TYPE_VLAN: pos = logical['HOMEPORTNAME'].rfind('.') if logical['HOMEPORTNAME'][:pos] == item: count[item] += 1 else: if logical['HOMEPORTNAME'] == item: count[item] += 1 sort_port = sorted(count.items(), key=lambda count: count[1]) return sort_port[0] def teardown_server(self, server_details, security_services=None): if not server_details: LOG.debug('Server details are empty.') return logical_port_id = server_details.get('logical_port_id') vlan_id = server_details.get('vlan_id') ad_created = server_details.get('ad_created') ldap_created = server_details.get('ldap_created') # Delete logical_port. if logical_port_id: logical_port_exists = ( self.helper.check_logical_port_exists_by_id(logical_port_id)) if logical_port_exists: self.helper.delete_logical_port(logical_port_id) # Delete vlan. if vlan_id and vlan_id != '0': vlan_exists = self.helper.check_vlan_exists_by_id(vlan_id) if vlan_exists: self.helper.delete_vlan(vlan_id) if security_services: active_directory, ldap = ( self._get_valid_security_service(security_services)) if ad_created and ad_created == '1' and active_directory: dns_ip = active_directory['dns_ip'] user = active_directory['user'] password = active_directory['password'] domain = active_directory['domain'] # Check DNS server exists or not. ip_address = self.helper.get_DNS_ip_address() if ip_address and ip_address[0] == dns_ip: dns_ip_list = [] self.helper.set_DNS_ip_address(dns_ip_list) # Check AD config exists or not. ad_exists, AD_domain = self.helper.get_AD_domain_name() if ad_exists and AD_domain == domain: self.helper.delete_AD_config(user, password) self._check_AD_expected_status( constants.STATUS_EXIT_DOMAIN) if ldap_created and ldap_created == '1' and ldap: server = ldap['server'] domain = ldap['domain'] # Check LDAP config exists or not. ldap_exists, LDAP_domain = ( self.helper.get_LDAP_domain_server()) if ldap_exists: LDAP_config = self.helper.get_LDAP_config() if (LDAP_config['LDAPSERVER'] == server and LDAP_config['BASEDN'] == domain): self.helper.delete_LDAP_config() def ensure_share(self, share, share_server=None): """Ensure that share is exported.""" share_proto = share['share_proto'] share_name = share['name'] share_id = share['id'] share_url_type = self.helper._get_share_url_type(share_proto) share_storage = self.helper._get_share_by_name(share_name, share_url_type) if not share_storage: raise exception.ShareResourceNotFound(share_id=share_id) fs_id = share_storage['FSID'] self.assert_filesystem(fs_id) ip = self._get_share_ip(share_server) location = self._get_location_path(share_name, share_proto, ip) return [location] manila-2.0.0/manila/share/drivers/glusterfs/0000775000567000056710000000000012701407265022156 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/glusterfs/__init__.py0000664000567000056710000003342712701407112024267 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Flat network GlusterFS Driver. Manila shares are subdirectories within a GlusterFS volume. The backend, a GlusterFS cluster, uses one of the two NFS servers, Gluster-NFS or NFS-Ganesha, based on a configuration option, to mediate access to the shares. NFS-Ganesha server supports NFSv3 and v4 protocols, while Gluster-NFS server supports only NFSv3 protocol. TODO(rraja): support SMB protocol. """ import re import socket import sys from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers import ganesha from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) GlusterfsManilaShare_opts = [ cfg.StrOpt('glusterfs_nfs_server_type', default='Gluster', help='Type of NFS server that mediate access to the Gluster ' 'volumes (Gluster or Ganesha).'), cfg.StrOpt('glusterfs_ganesha_server_ip', help="Remote Ganesha server node's IP address."), cfg.StrOpt('glusterfs_ganesha_server_username', default='root', help="Remote Ganesha server node's username."), cfg.StrOpt('glusterfs_ganesha_server_password', secret=True, help="Remote Ganesha server node's login password. " "This is not required if 'glusterfs_path_to_private_key'" ' is configured.'), ] CONF = cfg.CONF CONF.register_opts(GlusterfsManilaShare_opts) NFS_EXPORT_DIR = 'nfs.export-dir' NFS_EXPORT_VOL = 'nfs.export-volumes' NFS_RPC_AUTH_ALLOW = 'nfs.rpc-auth-allow' NFS_RPC_AUTH_REJECT = 'nfs.rpc-auth-reject' class GlusterfsShareDriver(driver.ExecuteMixin, driver.GaneshaMixin, layout.GlusterfsShareDriverBase): """Execute commands relating to Shares.""" GLUSTERFS_VERSION_MIN = (3, 5) supported_layouts = ('layout_directory.GlusterfsDirectoryMappedLayout', 'layout_volume.GlusterfsVolumeMappedLayout') supported_protocols = ('NFS',) def __init__(self, *args, **kwargs): super(GlusterfsShareDriver, self).__init__(False, *args, **kwargs) self._helpers = {} self.configuration.append_config_values(GlusterfsManilaShare_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'GlusterFS' self.nfs_helper = getattr( sys.modules[__name__], self.configuration.glusterfs_nfs_server_type + 'NFSHelper') def do_setup(self, context): # in order to do an initial instantialization of the helper self._get_helper() super(GlusterfsShareDriver, self).do_setup(context) def _setup_via_manager(self, share_manager, share_manager_parent=None): gluster_manager = share_manager['manager'] # TODO(csaba): This should be refactored into proper dispatch to helper if self.nfs_helper == GlusterNFSHelper and not gluster_manager.path: # default behavior of NFS_EXPORT_VOL is as if it were 'on' export_vol = gluster_manager.get_vol_option( NFS_EXPORT_VOL, boolean=True) if export_vol is False: raise exception.GlusterfsException( _("Gluster-NFS with volume layout should be used " "with `nfs.export-volumes = on`")) setting = [NFS_RPC_AUTH_REJECT, '*'] else: # gluster-nfs export of the whole volume must be prohibited # to not to defeat access control setting = [NFS_EXPORT_VOL, False] gluster_manager.set_vol_option(*setting) return self.nfs_helper(self._execute, self.configuration, gluster_manager=gluster_manager).get_export( share_manager['share']) def check_for_setup_error(self): pass def _update_share_stats(self): """Retrieve stats info from the GlusterFS volume.""" data = dict( storage_protocol='NFS', vendor_name='Red Hat', share_backend_name=self.backend_name, reserved_percentage=self.configuration.reserved_share_percentage) super(GlusterfsShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): return 0 def _get_helper(self, gluster_mgr=None): """Choose a protocol specific helper class.""" helper_class = self.nfs_helper if (self.nfs_helper == GlusterNFSHelper and gluster_mgr and not gluster_mgr.path): helper_class = GlusterNFSVolHelper helper = helper_class(self._execute, self.configuration, gluster_manager=gluster_mgr) helper.init_helper() return helper def _allow_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Allow access to the share.""" self._get_helper(gluster_mgr).allow_access('/', share, access) def _deny_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Allow access to the share.""" self._get_helper(gluster_mgr).deny_access('/', share, access) class GlusterNFSHelper(ganesha.NASHelperBase): """Manage shares with Gluster-NFS server.""" def __init__(self, execute, config_object, **kwargs): self.gluster_manager = kwargs.pop('gluster_manager') super(GlusterNFSHelper, self).__init__(execute, config_object, **kwargs) def get_export(self, share): return self.gluster_manager.export def _get_export_dir_dict(self): """Get the export entries of shares in the GlusterFS volume.""" export_dir = self.gluster_manager.get_vol_option( NFS_EXPORT_DIR) edh = {} if export_dir: # see # https://github.com/gluster/glusterfs # /blob/aa19909/xlators/nfs/server/src/nfs.c#L1582 # regarding the format of nfs.export-dir edl = export_dir.split(',') # parsing export_dir into a dict of {dir: [hostpec,..]..} # format r = re.compile('\A/(.*)\((.*)\)\Z') for ed in edl: d, e = r.match(ed).groups() edh[d] = e.split('|') return edh def _manage_access(self, share_name, access_type, access_to, cbk): """Manage share access with cbk. Adjust the exports of the Gluster-NFS server using cbk. :param share_name: name of the share :type share_name: string :param access_type: type of access allowed in Manila :type access_type: string :param access_to: ip of the guest whose share access is managed :type access_to: string :param cbk: callback to adjust the exports of NFS server Following is the description of cbk(ddict, edir, host). :param ddict: association of shares with ips that have access to them :type ddict: dict :param edir: name of share i.e. export directory :type edir: string :param host: ip address derived from the access object :type host: string :returns: bool (cbk leaves ddict intact) or None (cbk modifies ddict) """ if access_type != 'ip': raise exception.InvalidShareAccess('only ip access type allowed') export_dir_dict = self._get_export_dir_dict() if cbk(export_dir_dict, share_name, access_to): return if export_dir_dict: export_dir_new = (",".join("/%s(%s)" % (d, "|".join(v)) for d, v in sorted(export_dir_dict.items()))) else: export_dir_new = None self.gluster_manager.set_vol_option(NFS_EXPORT_DIR, export_dir_new) def allow_access(self, base, share, access): """Allow access to a share.""" def cbk(ddict, edir, host): if edir not in ddict: ddict[edir] = [] if host in ddict[edir]: return True ddict[edir].append(host) path = self.gluster_manager.path self._manage_access(path[1:], access['access_type'], access['access_to'], cbk) def deny_access(self, base, share, access): """Deny access to a share.""" def cbk(ddict, edir, host): if edir not in ddict or host not in ddict[edir]: return True ddict[edir].remove(host) if not ddict[edir]: ddict.pop(edir) path = self.gluster_manager.path self._manage_access(path[1:], access['access_type'], access['access_to'], cbk) class GlusterNFSVolHelper(GlusterNFSHelper): """Manage shares with Gluster-NFS server, volume mapped variant.""" def _get_vol_exports(self): export_vol = self.gluster_manager.get_vol_option( NFS_RPC_AUTH_ALLOW) return export_vol.split(',') if export_vol else [] def _manage_access(self, access_type, access_to, cbk): """Manage share access with cbk. Adjust the exports of the Gluster-NFS server using cbk. :param access_type: type of access allowed in Manila :type access_type: string :param access_to: ip of the guest whose share access is managed :type access_to: string :param cbk: callback to adjust the exports of NFS server Following is the description of cbk(explist, host). :param explist: list of hosts that have access to the share :type explist: list :param host: ip address derived from the access object :type host: string :returns: bool (cbk leaves ddict intact) or None (cbk modifies ddict) """ if access_type != 'ip': raise exception.InvalidShareAccess('only ip access type allowed') export_vol_list = self._get_vol_exports() if cbk(export_vol_list, access_to): return if export_vol_list: argseq = ((NFS_RPC_AUTH_ALLOW, ','.join(export_vol_list)), (NFS_RPC_AUTH_REJECT, None)) else: argseq = ((NFS_RPC_AUTH_ALLOW, None), (NFS_RPC_AUTH_REJECT, '*')) for args in argseq: self.gluster_manager.set_vol_option(*args) def allow_access(self, base, share, access): """Allow access to a share.""" def cbk(explist, host): if host in explist: return True explist.append(host) self._manage_access(access['access_type'], access['access_to'], cbk) def deny_access(self, base, share, access): """Deny access to a share.""" def cbk(explist, host): if host not in explist: return True explist.remove(host) self._manage_access(access['access_type'], access['access_to'], cbk) class GaneshaNFSHelper(ganesha.GaneshaNASHelper): shared_data = {} def __init__(self, execute, config_object, **kwargs): self.gluster_manager = kwargs.pop('gluster_manager') if config_object.glusterfs_ganesha_server_ip: execute = ganesha_utils.SSHExecutor( config_object.glusterfs_ganesha_server_ip, 22, None, config_object.glusterfs_ganesha_server_username, password=config_object.glusterfs_ganesha_server_password, privatekey=config_object.glusterfs_path_to_private_key) else: execute = ganesha_utils.RootExecutor(execute) self.ganesha_host = config_object.glusterfs_ganesha_server_ip if not self.ganesha_host: self.ganesha_host = socket.gethostname() kwargs['tag'] = '-'.join(('GLUSTER', 'Ganesha', self.ganesha_host)) super(GaneshaNFSHelper, self).__init__(execute, config_object, **kwargs) def get_export(self, share): return ':/'.join((self.ganesha_host, share['name'] + "--")) def init_helper(self): @utils.synchronized(self.tag) def _init_helper(): if self.tag in self.shared_data: return True super(GaneshaNFSHelper, self).init_helper() self.shared_data[self.tag] = { 'ganesha': self.ganesha, 'export_template': self.export_template} return False if _init_helper(): tagdata = self.shared_data[self.tag] self.ganesha = tagdata['ganesha'] self.export_template = tagdata['export_template'] def _default_config_hook(self): """Callback to provide default export block.""" dconf = super(GaneshaNFSHelper, self)._default_config_hook() conf_dir = ganesha_utils.path_from(__file__, "conf") ganesha_utils.patch(dconf, self._load_conf_dir(conf_dir)) return dconf def _fsal_hook(self, base, share, access): """Callback to create FSAL subblock.""" return {"Hostname": self.gluster_manager.host, "Volume": self.gluster_manager.volume, "Volpath": self.gluster_manager.path} manila-2.0.0/manila/share/drivers/glusterfs/common.py0000664000567000056710000004077312701407112024022 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common GlussterFS routines.""" import re import xml.etree.cElementTree as etree from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila.share.drivers.ganesha import utils as ganesha_utils LOG = log.getLogger(__name__) glusterfs_common_opts = [ cfg.StrOpt('glusterfs_server_password', secret=True, deprecated_name='glusterfs_native_server_password', help='Remote GlusterFS server node\'s login password. ' 'This is not required if ' '\'glusterfs_path_to_private_key\' is ' 'configured.'), cfg.StrOpt('glusterfs_path_to_private_key', deprecated_name='glusterfs_native_path_to_private_key', help='Path of Manila host\'s private SSH key file.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_common_opts) def _check_volume_presence(f): def wrapper(self, *args, **kwargs): if not self.components.get('volume'): raise exception.GlusterfsException( _("Gluster address does not have a volume component.")) return f(self, *args, **kwargs) return wrapper def volxml_get(xmlout, path, *default): """Extract a value by a path from XML.""" value = xmlout.find(path) if value is None: if default: return default[0] raise exception.InvalidShare( _('Xpath %s not found in volume query response XML') % path) return value.text class GlusterManager(object): """Interface with a GlusterFS volume.""" scheme = re.compile('\A(?:(?P[^:@/]+)@)?' '(?P[^:@/]+)' '(?::/(?P[^/]+)(?P/.*)?)?\Z') # See this about GlusterFS' convention for Boolean interpretation # of strings: # https://github.com/gluster/glusterfs/blob/v3.7.8/ # libglusterfs/src/common-utils.c#L1680-L1708 GLUSTERFS_TRUE_VALUES = ('ON', 'YES', 'TRUE', 'ENABLE', '1') GLUSTERFS_FALSE_VALUES = ('OFF', 'NO', 'FALSE', 'DISABLE', '0') @classmethod def parse(cls, address): """Parse address string into component dict.""" m = cls.scheme.search(address) if not m: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) return m.groupdict() def __getattr__(self, attr): if attr in self.components: return self.components[attr] raise AttributeError("'%(typ)s' object has no attribute '%(attr)s'" % {'typ': type(self).__name__, 'attr': attr}) def __init__(self, address, execf=None, path_to_private_key=None, remote_server_password=None, requires={}): """Initialize a GlusterManager instance. :param address: the Gluster URI (either string of [@][:/[/]] format or component dict with "user", "host", "volume", "path" keys). :param execf: executor function for management commands. :param path_to_private_key: path to private ssh key of remote server. :param remote_server_password: ssh password for remote server. :param requires: a dict mapping some of the component names to either True or False; having it specified, respectively, the presence or absence of the given component in the uri will be enforced. """ if isinstance(address, dict): tmp_addr = "" if address.get('user') is not None: tmp_addr = address.get('user') + '@' if address.get('host') is not None: tmp_addr += address.get('host') if address.get('volume') is not None: tmp_addr += ':/' + address.get('volume') if address.get('path') is not None: tmp_addr += address.get('path') self.components = self.parse(tmp_addr) # Verify that the original dictionary matches the parsed # dictionary. This will flag typos such as {'volume': 'vol/err'} # in the original dictionary as errors. Additionally, # extra keys will need to be flagged as an error. sanitized_address = {key: None for key in self.scheme.groupindex} sanitized_address.update(address) if sanitized_address != self.components: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) else: self.components = self.parse(address) for k, v in requires.items(): if v is None: continue if (self.components.get(k) is not None) != v: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) self.path_to_private_key = path_to_private_key self.remote_server_password = remote_server_password if execf: self.gluster_call = self.make_gluster_call(execf) @property def host_access(self): return '@'.join(filter(None, (self.user, self.host))) def _build_uri(self, base): u = base for sep, comp in ((':/', 'volume'), ('', 'path')): if self.components[comp] is None: break u = sep.join((u, self.components[comp])) return u @property def qualified(self): return self._build_uri(self.host_access) @property def export(self): if self.volume: return self._build_uri(self.host) def make_gluster_call(self, execf): """Execute a Gluster command locally or remotely.""" if self.user: gluster_execf = ganesha_utils.SSHExecutor( self.host, 22, None, self.user, password=self.remote_server_password, privatekey=self.path_to_private_key) else: gluster_execf = ganesha_utils.RootExecutor(execf) def _gluster_call(*args, **kwargs): logmsg = kwargs.pop('log', None) error_policy = kwargs.pop('error_policy', 'coerce') if (error_policy not in ('raw', 'coerce', 'suppress') and not isinstance(error_policy[0], int)): raise TypeError(_("undefined error_policy %s") % repr(error_policy)) try: return gluster_execf(*(('gluster',) + args), **kwargs) except exception.ProcessExecutionError as exc: if error_policy == 'raw': raise elif error_policy == 'coerce': pass elif (error_policy == 'suppress' or exc.exit_code in error_policy): return if logmsg: LOG.error(_LE("%s: GlusterFS instrumentation failed.") % logmsg) raise exception.GlusterfsException( _("GlusterFS management command '%(cmd)s' failed " "with details as follows:\n%(details)s.") % { 'cmd': ' '.join(args), 'details': exc}) return _gluster_call def xml_response_check(self, xmlout, command, countpath=None): """Sanity check for GlusterFS XML response.""" commandstr = ' '.join(command) ret = {} for e in 'opRet', 'opErrno': ret[e] = int(volxml_get(xmlout, e)) if ret == {'opRet': -1, 'opErrno': 0}: raise exception.GlusterfsException(_( 'GlusterFS command %(command)s on volume %(volume)s failed' ) % {'volume': self.volume, 'command': command}) if list(six.itervalues(ret)) != [0, 0]: errdct = {'volume': self.volume, 'command': commandstr, 'opErrstr': volxml_get(xmlout, 'opErrstr', None)} errdct.update(ret) raise exception.InvalidShare(_( 'GlusterFS command %(command)s on volume %(volume)s got ' 'unexpected response: ' 'opRet=%(opRet)s, opErrno=%(opErrno)s, opErrstr=%(opErrstr)s' ) % errdct) if not countpath: return count = volxml_get(xmlout, countpath) if count != '1': raise exception.InvalidShare( _('GlusterFS command %(command)s on volume %(volume)s got ' 'ambiguous response: ' '%(count)s records') % { 'volume': self.volume, 'command': commandstr, 'count': count}) def _get_vol_option_via_info(self, option): """Get the value of an option set on a GlusterFS volume via volinfo.""" args = ('--xml', 'volume', 'info', self.volume) out, err = self.gluster_call(*args, log=_LE("retrieving volume info")) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % self.volume ) volxml = etree.fromstring(out) self.xml_response_check(volxml, args[1:], './volInfo/volumes/count') for e in volxml.findall(".//option"): o, v = (volxml_get(e, a) for a in ('name', 'value')) if o == option: return v @_check_volume_presence def _get_vol_user_option(self, useropt): """Get the value of an user option set on a GlusterFS volume.""" option = '.'.join(('user', useropt)) return self._get_vol_option_via_info(option) @_check_volume_presence def _get_vol_regular_option(self, option): """Get the value of a regular option set on a GlusterFS volume.""" args = ('--xml', 'volume', 'get', self.volume, option) out, err = self.gluster_call(*args, check_exit_code=False) if not out: # all input is valid, but the option has not been set # (nb. some options do come by a null value, but some # don't even have that, see eg. cluster.nufa) return try: optxml = etree.fromstring(out) except Exception: # non-xml output indicates that GlusterFS backend does not support # 'vol get', we fall back to 'vol info' based retrieval (glusterfs # < 3.7). return self._get_vol_option_via_info(option) self.xml_response_check(optxml, args[1:], './volGetopts/count') return volxml_get(optxml, './volGetopts/Value') def get_vol_option(self, option, boolean=False): """Get the value of an option set on a GlusterFS volume.""" useropt = re.sub('\Auser\.', '', option) if option == useropt: value = self._get_vol_regular_option(option) else: value = self._get_vol_user_option(useropt) if not boolean or value is None: return value if value.upper() in self.GLUSTERFS_TRUE_VALUES: return True if value.upper() in self.GLUSTERFS_FALSE_VALUES: return False raise exception.GlusterfsException(_( "GlusterFS volume option on volume %(volume)s: " "%(option)s=%(value)s cannot be interpreted as Boolean") % { 'volume': self.volume, 'option': option, 'value': value}) @_check_volume_presence def set_vol_option(self, option, value, ignore_failure=False): value = {True: self.GLUSTERFS_TRUE_VALUES[0], False: self.GLUSTERFS_FALSE_VALUES[0]}.get(value, value) if value is None: args = ('reset', (option,)) else: args = ('set', (option, value)) policy = (1,) if ignore_failure else 'coerce' self.gluster_call( 'volume', args[0], self.volume, *args[1], error_policy=policy) def get_gluster_version(self): """Retrieve GlusterFS version. :returns: version (as tuple of strings, example: ('3', '6', '0beta2')) """ out, err = self.gluster_call('--version', log=_LE("GlusterFS version query")) try: owords = out.split() if owords[0] != 'glusterfs': raise RuntimeError vers = owords[1].split('.') # provoke an exception if vers does not start with two numerals int(vers[0]) int(vers[1]) except Exception: raise exception.GlusterfsException( _("Cannot parse version info obtained from server " "%(server)s, version info: %(info)s") % {'server': self.host, 'info': out}) return vers def check_gluster_version(self, minvers): """Retrieve and check GlusterFS version. :param minvers: minimum version to require (given as tuple of integers, example: (3, 6)) """ vers = self.get_gluster_version() if numreduct(vers) < minvers: raise exception.GlusterfsException(_( "Unsupported GlusterFS version %(version)s on server " "%(server)s, minimum requirement: %(minvers)s") % { 'server': self.host, 'version': '.'.join(vers), 'minvers': '.'.join(six.text_type(c) for c in minvers)}) def numreduct(vers): """The numeric reduct of a tuple of strings. That is, applying an integer conversion map on the longest initial segment of vers which consists of numerals. """ numvers = [] for c in vers: try: numvers.append(int(c)) except ValueError: break return tuple(numvers) def _mount_gluster_vol(execute, gluster_export, mount_path, ensure=False): """Mount a GlusterFS volume at the specified mount path. :param execute: command exectution function :param gluster_export: GlusterFS export to mount :param mount_path: path to mount at :param ensure: boolean to allow remounting a volume with a warning """ execute('mkdir', '-p', mount_path) command = ['mount', '-t', 'glusterfs', gluster_export, mount_path] try: execute(*command, run_as_root=True) except exception.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: LOG.warning(_LW("%s is already mounted."), gluster_export) else: raise exception.GlusterfsException( 'Unable to mount Gluster volume' ) def _umount_gluster_vol(execute, mount_path): """Unmount a GlusterFS volume at the specified mount path. :param execute: command exectution function :param mount_path: path where volume is mounted """ try: execute('umount', mount_path, run_as_root=True) except exception.ProcessExecutionError as exc: msg = (_("Unable to unmount gluster volume. " "mount_dir: %(mount_path)s, Error: %(error)s") % {'mount_path': mount_path, 'error': exc.stderr}) LOG.error(msg) raise exception.GlusterfsException(msg) def _restart_gluster_vol(gluster_mgr): """Restart a GlusterFS volume through its manager. :param gluster_mgr: GlusterManager instance """ # TODO(csaba): '--mode=script' ensures that the Gluster CLI runs in # script mode. This seems unnecessary as the Gluster CLI is # expected to run in non-interactive mode when the stdin is not # a terminal, as is the case below. But on testing, found the # behaviour of Gluster-CLI to be the contrary. Need to investigate # this odd-behaviour of Gluster-CLI. gluster_mgr.gluster_call( 'volume', 'stop', gluster_mgr.volume, '--mode=script', log=_LE("stopping GlusterFS volume %s") % gluster_mgr.volume) gluster_mgr.gluster_call( 'volume', 'start', gluster_mgr.volume, log=_LE("starting GlusterFS volume %s") % gluster_mgr.volume) manila-2.0.0/manila/share/drivers/glusterfs/conf/0000775000567000056710000000000012701407265023103 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/glusterfs/conf/10-glusterfs-export-template.conf0000664000567000056710000000020612701407107031327 0ustar jenkinsjenkins00000000000000EXPORT { FSAL { Name = GLUSTER; Hostname = @config; Volume = @config; Volpath = @runtime; } } manila-2.0.0/manila/share/drivers/glusterfs/layout_directory.py0000664000567000056710000002123412701407107026126 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS directory mapped share layout.""" import os from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout LOG = log.getLogger(__name__) glusterfs_directory_mapped_opts = [ cfg.StrOpt('glusterfs_target', help='Specifies the GlusterFS volume to be mounted on the ' 'Manila host. It is of the form ' '[remoteuser@]:.'), cfg.StrOpt('glusterfs_mount_point_base', default='$state_path/mnt', help='Base directory containing mount points for Gluster ' 'volumes.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_directory_mapped_opts) class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase): def __init__(self, driver, *args, **kwargs): super(GlusterfsDirectoryMappedLayout, self).__init__( driver, *args, **kwargs) self.configuration.append_config_values( common.glusterfs_common_opts) self.configuration.append_config_values( glusterfs_directory_mapped_opts) def _glustermanager(self, gluster_address): """Create GlusterManager object for gluster_address.""" return common.GlusterManager( gluster_address, self.driver._execute, self.configuration.glusterfs_path_to_private_key, self.configuration.glusterfs_server_password, requires={'volume': True}) def do_setup(self, context): """Prepares the backend and appropriate NAS helpers.""" if not self.configuration.glusterfs_target: raise exception.GlusterfsException( _('glusterfs_target configuration that specifies the GlusterFS' ' volume to be mounted on the Manila host is not set.')) self.gluster_manager = self._glustermanager( self.configuration.glusterfs_target) self.gluster_manager.check_gluster_version( self.driver.GLUSTERFS_VERSION_MIN) self._check_mount_glusterfs() # enable quota options of a GlusteFS volume to allow # creation of shares of specific size args = ('volume', 'quota', self.gluster_manager.volume, 'enable') try: self.gluster_manager.gluster_call(*args) except exception.GlusterfsException: if (self.gluster_manager. get_vol_option('features.quota')) != 'on': LOG.error(_LE("Error in tuning GlusterFS volume to enable " "creation of shares of specific size.")) raise self._ensure_gluster_vol_mounted() def _share_manager(self, share): comp_path = self.gluster_manager.components.copy() comp_path.update({'path': '/' + share['name']}) return self._glustermanager(comp_path) def _get_mount_point_for_gluster_vol(self): """Return mount point for the GlusterFS volume.""" return os.path.join(self.configuration.glusterfs_mount_point_base, self.gluster_manager.volume) def _ensure_gluster_vol_mounted(self): """Ensure GlusterFS volume is native-mounted on Manila host.""" mount_path = self._get_mount_point_for_gluster_vol() try: common._mount_gluster_vol(self.driver._execute, self.gluster_manager.export, mount_path, ensure=True) except exception.GlusterfsException: LOG.error(_LE('Could not mount the Gluster volume %s'), self.gluster_manager.volume) raise def _get_local_share_path(self, share): """Determine mount path of the GlusterFS volume in the Manila host.""" local_vol_path = self._get_mount_point_for_gluster_vol() if not os.access(local_vol_path, os.R_OK): raise exception.GlusterfsException('share path %s does not exist' % local_vol_path) return os.path.join(local_vol_path, share['name']) def _update_share_stats(self): """Retrieve stats info from the GlusterFS volume.""" # sanity check for gluster ctl mount smpb = os.stat(self.configuration.glusterfs_mount_point_base) smp = os.stat(self._get_mount_point_for_gluster_vol()) if smpb.st_dev == smp.st_dev: raise exception.GlusterfsException( _("GlusterFS control mount is not available") ) smpv = os.statvfs(self._get_mount_point_for_gluster_vol()) return {'total_capacity_gb': (smpv.f_blocks * smpv.f_frsize) >> 30, 'free_capacity_gb': (smpv.f_bavail * smpv.f_frsize) >> 30} def create_share(self, ctx, share, share_server=None): """Create a sub-directory/share in the GlusterFS volume.""" # probe into getting a NAS protocol helper for the share in order # to facilitate early detection of unsupported protocol type sizestr = six.text_type(share['size']) + 'GB' share_dir = '/' + share['name'] local_share_path = self._get_local_share_path(share) cmd = ['mkdir', local_share_path] # set hard limit quota on the sub-directory/share args = ('volume', 'quota', self.gluster_manager.volume, 'limit-usage', share_dir, sizestr) try: self.driver._execute(*cmd, run_as_root=True) self.gluster_manager.gluster_call(*args) except Exception as exc: if isinstance(exc, exception.ProcessExecutionError): exc = exception.GlusterfsException(exc) if isinstance(exc, exception.GlusterfsException): self._cleanup_create_share(local_share_path, share['name']) LOG.error(_LE('Unable to create share %s'), share['name']) raise exc comp_share = self.gluster_manager.components.copy() comp_share['path'] = '/' + share['name'] export_location = self.driver._setup_via_manager( {'share': share, 'manager': self._glustermanager(comp_share)}) return export_location def _cleanup_create_share(self, share_path, share_name): """Cleanup share that errored out during its creation.""" if os.path.exists(share_path): cmd = ['rm', '-rf', share_path] try: self.driver._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError as exc: LOG.error(_LE('Cannot cleanup share, %s, that errored out ' 'during its creation, but exists in GlusterFS ' 'volume.'), share_name) raise exception.GlusterfsException(exc) def delete_share(self, context, share, share_server=None): """Remove a sub-directory/share from the GlusterFS volume.""" local_share_path = self._get_local_share_path(share) cmd = ['rm', '-rf', local_share_path] try: self.driver._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError: LOG.error(_LE('Unable to delete share %s'), share['name']) raise def ensure_share(self, context, share, share_server=None): pass def create_share_from_snapshot(self, context, share, snapshot, share_server=None): raise NotImplementedError def create_snapshot(self, context, snapshot, share_server=None): raise NotImplementedError def delete_snapshot(self, context, snapshot, share_server=None): raise NotImplementedError def manage_existing(self, share, driver_options): raise NotImplementedError def unmanage(self, share): raise NotImplementedError def extend_share(self, share, new_size, share_server=None): raise NotImplementedError def shrink_share(self, share, new_size, share_server=None): raise NotImplementedError manila-2.0.0/manila/share/drivers/glusterfs/layout.py0000664000567000056710000002070012701407107024037 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS share layouts. A share layout encapsulates a particular way of mapping GlusterFS entities to a share and utilizing them to back the share. """ import abc import errno from oslo_config import cfg from oslo_utils import importutils import six from manila import exception from manila.i18n import _ from manila.share import driver glusterfs_share_layout_opts = [ cfg.StrOpt( 'glusterfs_share_layout', help="Specifies GlusterFS share layout, that is, " "the method of associating backing GlusterFS " "resources to shares."), ] CONF = cfg.CONF CONF.register_opts(glusterfs_share_layout_opts) class GlusterfsShareDriverBase(driver.ShareDriver): LAYOUT_PREFIX = 'manila.share.drivers.glusterfs' supported_layouts = () supported_protocols = () GLUSTERFS_VERSION_MIN = (0, 0) def __init__(self, *args, **kwargs): super(GlusterfsShareDriverBase, self).__init__(*args, **kwargs) self.configuration.append_config_values( glusterfs_share_layout_opts) layout_name = self.configuration.glusterfs_share_layout if not layout_name: layout_name = self.supported_layouts[0] if layout_name not in self.supported_layouts: raise exception.GlusterfsException( _('driver %(driver)s does not support %(layout)s layout') % {'driver': type(self).__name__, 'layout': layout_name}) self.layout = importutils.import_object( '.'.join((self.LAYOUT_PREFIX, layout_name)), self, **kwargs) # we determine snapshot support in our own scope, as # 1) the calculation based on parent method # redefinition does not work for us, as actual # glusterfs driver classes are subclassed from # *this* class, not from driver.ShareDriver # and they don't need to redefine snapshot # methods for themselves; # 2) snapshot support depends on choice of layout. self._snapshots_are_supported = getattr(self.layout, '_snapshots_are_supported', False) def _setup_via_manager(self, share_mgr, share_mgr_parent=None): """Callback for layout's `create_share` and `create_share_from_snapshot` :param share_mgr: a {'share': , 'manager': } dict where is the share created in `create_share` or `create_share_from_snapshot` and is a GlusterManager instance representing the GlusterFS resource allocated for it. :param gluster_mgr_parent: a {'share': , 'manager': } dict where is the original share of the snapshot used in `create_share_from_snapshot` and is a GlusterManager instance representing the GlusterFS resource allocated for it. :returns: export location for share_mgr['share']. """ def allow_access(self, context, share, access, share_server=None): gluster_mgr = self.layout._share_manager(share) return self._allow_access_via_manager(gluster_mgr, context, share, access, share_server) def deny_access(self, context, share, access, share_server=None): gluster_mgr = self.layout._share_manager(share) return self._deny_access_via_manager(gluster_mgr, context, share, access, share_server) def _allow_access_via_manager(self, gluster_mgr, context, share, access, share_server): raise NotImplementedError() def _deny_access_via_manager(self, gluster_mgr, context, share, access, share_server): raise NotImplementedError() def do_setup(self, *a, **kw): return self.layout.do_setup(*a, **kw) @classmethod def _check_proto(cls, share): proto = share['share_proto'].upper() if proto not in cls.supported_protocols: msg = _("Share protocol %s is not supported.") % proto raise exception.ShareBackendException(msg=msg) def create_share(self, context, share, *a, **kw): self._check_proto(share) return self.layout.create_share(context, share, *a, **kw) def create_share_from_snapshot(self, context, share, *a, **kw): self._check_proto(share) return self.layout.create_share_from_snapshot(context, share, *a, **kw) def create_snapshot(self, *a, **kw): return self.layout.create_snapshot(*a, **kw) def delete_share(self, *a, **kw): return self.layout.delete_share(*a, **kw) def delete_snapshot(self, *a, **kw): return self.layout.delete_snapshot(*a, **kw) def ensure_share(self, *a, **kw): return self.layout.ensure_share(*a, **kw) def manage_existing(self, *a, **kw): return self.layout.manage_existing(*a, **kw) def unmanage(self, *a, **kw): return self.layout.unmanage(*a, **kw) def extend_share(self, *a, **kw): return self.layout.extend_share(*a, **kw) def shrink_share(self, *a, **kw): return self.layout.shrink_share(*a, **kw) def _update_share_stats(self, data={}): try: data.update(self.layout._update_share_stats()) except NotImplementedError: pass super(GlusterfsShareDriverBase, self)._update_share_stats(data) @six.add_metaclass(abc.ABCMeta) class GlusterfsShareLayoutBase(object): """Base class for share layouts.""" def __init__(self, driver, *args, **kwargs): self.driver = driver self.configuration = kwargs.get('configuration') def _check_mount_glusterfs(self): """Checks if mount.glusterfs(8) is available.""" try: self.driver._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed.')) else: raise @abc.abstractmethod def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" @abc.abstractmethod def do_setup(self, context): """Any initialization the share driver does while starting.""" @abc.abstractmethod def create_share(self, context, share, share_server=None): """Is called to create share.""" @abc.abstractmethod def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" @abc.abstractmethod def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, context, share, share_server=None): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" @abc.abstractmethod def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" @abc.abstractmethod def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" @abc.abstractmethod def unmanage(self, share): """Removes the specified share from Manila management.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" @abc.abstractmethod def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def _update_share_stats(self): raise NotImplementedError() manila-2.0.0/manila/share/drivers/glusterfs/layout_volume.py0000664000567000056710000006320412701407112025430 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS volume mapped share layout.""" import os import random import re import shutil import string import tempfile import xml.etree.cElementTree as etree from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) glusterfs_volume_mapped_opts = [ cfg.ListOpt('glusterfs_servers', default=[], deprecated_name='glusterfs_targets', help='List of GlusterFS servers that can be used to create ' 'shares. Each GlusterFS server should be of the form ' '[remoteuser@], and they are assumed to ' 'belong to distinct Gluster clusters.'), cfg.StrOpt('glusterfs_volume_pattern', help='Regular expression template used to filter ' 'GlusterFS volumes for share creation. ' 'The regex template can optionally (ie. with support ' 'of the GlusterFS backend) contain the #{size} ' 'parameter which matches an integer (sequence of ' 'digits) in which case the value shall be interpreted as ' 'size of the volume in GB. Examples: ' '"manila-share-volume-\d+$", ' '"manila-share-volume-#{size}G-\d+$"; ' 'with matching volume names, respectively: ' '"manila-share-volume-12", "manila-share-volume-3G-13". ' 'In latter example, the number that matches "#{size}", ' 'that is, 3, is an indication that the size of volume ' 'is 3G.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_volume_mapped_opts) # The dict specifying named parameters # that can be used with glusterfs_volume_pattern # in #{} format. # For each of them we give regex pattern it matches # and a transformer function ('trans') for the matched # string value. # Currently we handle only #{size}. PATTERN_DICT = {'size': {'pattern': '(?P\d+)', 'trans': int}} USER_MANILA_SHARE = 'user.manila-share' USER_CLONED_FROM = 'user.manila-cloned-from' UUID_RE = re.compile('\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I) class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase): _snapshots_are_supported = True def __init__(self, driver, *args, **kwargs): super(GlusterfsVolumeMappedLayout, self).__init__( driver, *args, **kwargs) self.gluster_used_vols = set() self.configuration.append_config_values( common.glusterfs_common_opts) self.configuration.append_config_values( glusterfs_volume_mapped_opts) self.gluster_nosnap_vols_dict = {} self.volume_pattern = self._compile_volume_pattern() self.volume_pattern_keys = self.volume_pattern.groupindex.keys() for srvaddr in self.configuration.glusterfs_servers: # format check for srvaddr self._glustermanager(srvaddr, False) self.glusterfs_versions = {} self.private_storage = kwargs.get('private_storage') def _compile_volume_pattern(self): """Compile a RegexObject from the config specified regex template. (cfg.glusterfs_volume_pattern) """ subdict = {} for key, val in PATTERN_DICT.items(): subdict[key] = val['pattern'] # Using templates with placeholder syntax #{} class CustomTemplate(string.Template): delimiter = '#' volume_pattern = CustomTemplate( self.configuration.glusterfs_volume_pattern).substitute( subdict) return re.compile(volume_pattern) def do_setup(self, context): """Setup the GlusterFS volumes.""" glusterfs_versions, exceptions = {}, {} for srvaddr in self.configuration.glusterfs_servers: try: glusterfs_versions[srvaddr] = self._glustermanager( srvaddr, False).get_gluster_version() except exception.GlusterfsException as exc: exceptions[srvaddr] = six.text_type(exc) if exceptions: for srvaddr, excmsg in exceptions.items(): LOG.error(_LE("'gluster version' failed on server " "%(server)s with: %(message)s"), {'server': srvaddr, 'message': excmsg}) raise exception.GlusterfsException(_( "'gluster version' failed on servers %s") % ( ','.join(exceptions.keys()))) notsupp_servers = [] for srvaddr, vers in glusterfs_versions.items(): if common.numreduct(vers) < self.driver.GLUSTERFS_VERSION_MIN: notsupp_servers.append(srvaddr) if notsupp_servers: gluster_version_min_str = '.'.join( six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN) for srvaddr in notsupp_servers: LOG.error(_LE("GlusterFS version %(version)s on server " "%(server)s is not supported, " "minimum requirement: %(minvers)s"), {'server': srvaddr, 'version': '.'.join(glusterfs_versions[srvaddr]), 'minvers': gluster_version_min_str}) raise exception.GlusterfsException(_( "Unsupported GlusterFS version on servers %(servers)s, " "minimum requirement: %(minvers)s") % { 'servers': ','.join(notsupp_servers), 'minvers': gluster_version_min_str}) self.glusterfs_versions = glusterfs_versions gluster_volumes_initial = set( self._fetch_gluster_volumes(filter_used=False)) if not gluster_volumes_initial: # No suitable volumes are found on the Gluster end. # Raise exception. msg = (_("Gluster backend does not provide any volume " "matching pattern %s" ) % self.configuration.glusterfs_volume_pattern) LOG.error(msg) raise exception.GlusterfsException(msg) LOG.info(_LI("Found %d Gluster volumes allocated for Manila." ), len(gluster_volumes_initial)) self._check_mount_glusterfs() def _glustermanager(self, gluster_address, req_volume=True): """Create GlusterManager object for gluster_address.""" return common.GlusterManager( gluster_address, self.driver._execute, self.configuration.glusterfs_path_to_private_key, self.configuration.glusterfs_server_password, requires={'volume': req_volume}) def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" gluster_address = self.private_storage.get(share['id'], 'volume') if gluster_address is None: return return self._glustermanager(gluster_address) def _fetch_gluster_volumes(self, filter_used=True): """Do a 'gluster volume list | grep '. Aggregate the results from all servers. Extract the named groups from the matching volume names using the specs given in PATTERN_DICT. Return a dict with keys of the form :/ and values being dicts that map names of named groups to their extracted value. """ volumes_dict = {} for srvaddr in self.configuration.glusterfs_servers: gluster_mgr = self._glustermanager(srvaddr, False) if gluster_mgr.user: logmsg = _LE("Retrieving volume list " "on host %s") % gluster_mgr.host else: logmsg = _LE("Retrieving volume list") out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg) for volname in out.split("\n"): patmatch = self.volume_pattern.match(volname) if not patmatch: continue comp_vol = gluster_mgr.components.copy() comp_vol.update({'volume': volname}) gluster_mgr_vol = self._glustermanager(comp_vol) if filter_used: vshr = gluster_mgr_vol.get_vol_option( USER_MANILA_SHARE) or '' if UUID_RE.search(vshr): continue pattern_dict = {} for key in self.volume_pattern_keys: keymatch = patmatch.group(key) if keymatch is None: pattern_dict[key] = None else: trans = PATTERN_DICT[key].get('trans', lambda x: x) pattern_dict[key] = trans(keymatch) volumes_dict[gluster_mgr_vol.qualified] = pattern_dict return volumes_dict @utils.synchronized("glusterfs_native", external=False) def _pop_gluster_vol(self, size=None): """Pick an unbound volume. Do a _fetch_gluster_volumes() first to get the complete list of usable volumes. Keep only the unbound ones (ones that are not yet used to back a share). If size is given, try to pick one which has a size specification (according to the 'size' named group of the volume pattern), and its size is greater-than-or-equal to the given size. Return the volume chosen (in :/ format). """ voldict = self._fetch_gluster_volumes() # calculate the set of unused volumes unused_vols = set(voldict) - self.gluster_used_vols if not unused_vols: # No volumes available for use as share. Warn user. LOG.warning(_LW("No unused gluster volumes available for use as " "share! Create share won't be supported unless " "existing shares are deleted or some gluster " "volumes are created with names matching " "'glusterfs_volume_pattern'.")) else: LOG.info(_LI("Number of gluster volumes in use: " "%(inuse-numvols)s. Number of gluster volumes " "available for use as share: %(unused-numvols)s"), {'inuse-numvols': len(self.gluster_used_vols), 'unused-numvols': len(unused_vols)}) # volmap is the data structure used to categorize and sort # the unused volumes. It's a nested dictionary of structure # {: } # where is either an integer or None, # is a dictionary of structure {: } # where is a host name (IP address), is a list # of volumes (gluster addresses). volmap = {None: {}} # if both caller has specified size and 'size' occurs as # a parameter in the volume pattern... if size and 'size' in self.volume_pattern_keys: # then this function is used to extract the # size value for a given volume from the voldict... get_volsize = lambda vol: voldict[vol]['size'] else: # else just use a stub. get_volsize = lambda vol: None for vol in unused_vols: # For each unused volume, we extract the # and values with which it can be inserted # into the volmap, and conditionally perform # the insertion (with the condition being: once # caller specified size and a size indication was # found in the volume name, we require that the # indicated size adheres to caller's spec). volsize = get_volsize(vol) if not volsize or volsize >= size: hostmap = volmap.get(volsize) if not hostmap: hostmap = {} volmap[volsize] = hostmap host = self._glustermanager(vol).host hostvols = hostmap.get(host) if not hostvols: hostvols = [] hostmap[host] = hostvols hostvols.append(vol) if len(volmap) > 1: # volmap has keys apart from the default None, # ie. volumes with sensible and adherent size # indication have been found. Then pick the smallest # of the size values. chosen_size = sorted(n for n in volmap.keys() if n)[0] else: chosen_size = None chosen_hostmap = volmap[chosen_size] if not chosen_hostmap: msg = (_("Couldn't find a free gluster volume to use.")) LOG.error(msg) raise exception.GlusterfsException(msg) # From the hosts we choose randomly to tend towards # even distribution of share backing volumes among # Gluster clusters. chosen_host = random.choice(list(chosen_hostmap.keys())) # Within a host's volumes, choose alphabetically first, # to make it predictable. vol = sorted(chosen_hostmap[chosen_host])[0] self.gluster_used_vols.add(vol) return vol @utils.synchronized("glusterfs_native", external=False) def _push_gluster_vol(self, exp_locn): try: self.gluster_used_vols.remove(exp_locn) except KeyError: msg = (_("Couldn't find the share in used list.")) LOG.error(msg) raise exception.GlusterfsException(msg) def _wipe_gluster_vol(self, gluster_mgr): # Create a temporary mount. gluster_export = gluster_mgr.export tmpdir = tempfile.mkdtemp() try: common._mount_gluster_vol(self.driver._execute, gluster_export, tmpdir) except exception.GlusterfsException: shutil.rmtree(tmpdir, ignore_errors=True) raise # Delete the contents of a GlusterFS volume that is temporarily # mounted. # From GlusterFS version 3.7, two directories, '.trashcan' at the root # of the GlusterFS volume and 'internal_op' within the '.trashcan' # directory, are internally created when a GlusterFS volume is started. # GlusterFS does not allow unlink(2) of the two directories. So do not # delete the paths of the two directories, but delete their contents # along with the rest of the contents of the volume. srvaddr = gluster_mgr.host_access if common.numreduct(self.glusterfs_versions[srvaddr]) < (3, 7): cmd = ['find', tmpdir, '-mindepth', '1', '-delete'] else: ignored_dirs = map(lambda x: os.path.join(tmpdir, *x), [('.trashcan', ), ('.trashcan', 'internal_op')]) ignored_dirs = list(ignored_dirs) cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path', ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete'] try: self.driver._execute(*cmd, run_as_root=True) except exception.ProcessExecutionError as exc: msg = (_("Error trying to wipe gluster volume. " "gluster_export: %(export)s, Error: %(error)s") % {'export': gluster_export, 'error': exc.stderr}) LOG.error(msg) raise exception.GlusterfsException(msg) finally: # Unmount. common._umount_gluster_vol(self.driver._execute, tmpdir) shutil.rmtree(tmpdir, ignore_errors=True) def create_share(self, context, share, share_server=None): """Create a share using GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Pick an unused GlusterFS volume for use as a share. """ try: vol = self._pop_gluster_vol(share['size']) except exception.GlusterfsException: msg = (_LE("Error creating share %(share_id)s"), {'share_id': share['id']}) LOG.error(msg) raise gmgr = self._glustermanager(vol) export = self.driver._setup_via_manager( {'share': share, 'manager': gmgr}) gmgr.set_vol_option(USER_MANILA_SHARE, share['id']) self.private_storage.update(share['id'], {'volume': vol}) # TODO(deepakcs): Enable quota and set it to the share size. # For native protocol, the export_location should be of the form: # server:/volname LOG.info(_LI("export_location sent back from create_share: %s"), export) return export def delete_share(self, context, share, share_server=None): """Delete a share on the GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Put the gluster volume back in the available list. """ gmgr = self._share_manager(share) if not gmgr: # Share does not have a record in private storage. # It means create_share{,_from_snapshot} did not # succeed(*). In that case we should not obstruct # share deletion, so we just return doing nothing. # # (*) or we have a database corruption but then # basically does not matter what we do here return clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or '' try: if UUID_RE.search(clone_of): # We take responsibility for the lifecycle # management of those volumes which were # created by us (as snapshot clones) ... gmgr.gluster_call('volume', 'delete', gmgr.volume) else: # ... for volumes that come from the pool, we return # them to the pool (after some purification rituals) self._wipe_gluster_vol(gmgr) gmgr.set_vol_option(USER_MANILA_SHARE, 'NONE') self._push_gluster_vol(gmgr.qualified) except exception.GlusterfsException: msg = (_LE("Error during delete_share request for " "share %(share_id)s"), {'share_id': share['id']}) LOG.error(msg) raise self.private_storage.delete(share['id']) # TODO(deepakcs): Disable quota. @staticmethod def _find_actual_backend_snapshot_name(gluster_mgr, snapshot): args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script') out, err = gluster_mgr.gluster_call( *args, log=_LE("Retrieving snapshot list")) snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n"))) if len(snapgrep) != 1: msg = (_("Failed to identify backing GlusterFS object " "for snapshot %(snap_id)s of share %(share_id)s: " "a single candidate was expected, %(found)d was found.") % {'snap_id': snapshot['id'], 'share_id': snapshot['share_id'], 'found': len(snapgrep)}) raise exception.GlusterfsException(msg) backend_snapshot_name = snapgrep[0] return backend_snapshot_name def create_share_from_snapshot(self, context, share, snapshot, share_server=None): old_gmgr = self._share_manager(snapshot['share_instance']) # Snapshot clone feature in GlusterFS server essential to support this # API is available in GlusterFS server versions 3.7 and higher. So do # a version check. vers = self.glusterfs_versions[old_gmgr.host_access] minvers = (3, 7) if common.numreduct(vers) < minvers: minvers_str = '.'.join(six.text_type(c) for c in minvers) vers_str = '.'.join(vers) msg = (_("GlusterFS version %(version)s on server %(server)s does " "not support creation of shares from snapshot. " "minimum requirement: %(minversion)s") % {'version': vers_str, 'server': old_gmgr.host, 'minversion': minvers_str}) LOG.error(msg) raise exception.GlusterfsException(msg) # Clone the snapshot. The snapshot clone, a new GlusterFS volume # would serve as a share. backend_snapshot_name = self._find_actual_backend_snapshot_name( old_gmgr, snapshot) volume = ''.join(['manila-', share['id']]) args_tuple = (('snapshot', 'activate', backend_snapshot_name, 'force', '--mode=script'), ('snapshot', 'clone', volume, backend_snapshot_name)) for args in args_tuple: out, err = old_gmgr.gluster_call( *args, log=_LE("Creating share from snapshot")) # Get a manager for the the new volume/share. comp_vol = old_gmgr.components.copy() comp_vol.update({'volume': volume}) gmgr = self._glustermanager(comp_vol) export = self.driver._setup_via_manager( {'share': share, 'manager': gmgr}, {'share': snapshot['share_instance'], 'manager': old_gmgr}) argseq = (('set', [USER_CLONED_FROM, snapshot['share_id']]), ('set', [USER_MANILA_SHARE, share['id']]), ('start', [])) for op, opargs in argseq: args = ['volume', op, gmgr.volume] + opargs gmgr.gluster_call(*args, log=_LE("Creating share from snapshot")) self.gluster_used_vols.add(gmgr.qualified) self.private_storage.update(share['id'], {'volume': gmgr.qualified}) return export def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) if gluster_mgr.qualified in self.gluster_nosnap_vols_dict: opret, operrno = -1, 0 operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified] else: args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'], gluster_mgr.volume) out, err = gluster_mgr.gluster_call( *args, log=_LE("Retrieving volume info")) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % gluster_mgr.volume ) outxml = etree.fromstring(out) opret = int(common.volxml_get(outxml, 'opRet')) operrno = int(common.volxml_get(outxml, 'opErrno')) operrstr = common.volxml_get(outxml, 'opErrstr', None) if opret == -1: vers = self.glusterfs_versions[gluster_mgr.host_access] if common.numreduct(vers) > (3, 6): # This logic has not yet been implemented in GlusterFS 3.6 if operrno == 0: self.gluster_nosnap_vols_dict[ gluster_mgr.qualified] = operrstr msg = _("Share %(share_id)s does not support snapshots: " "%(errstr)s.") % {'share_id': snapshot['share_id'], 'errstr': operrstr} LOG.error(msg) raise exception.ShareSnapshotNotSupported(msg) raise exception.GlusterfsException( _("Creating snapshot for share %(share_id)s failed " "with %(errno)d: %(errstr)s") % { 'share_id': snapshot['share_id'], 'errno': operrno, 'errstr': operrstr}) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) backend_snapshot_name = self._find_actual_backend_snapshot_name( gluster_mgr, snapshot) args = ('--xml', 'snapshot', 'delete', backend_snapshot_name, '--mode=script') out, err = gluster_mgr.gluster_call( *args, log=_LE("Error deleting snapshot")) if not out: raise exception.GlusterfsException( _('gluster snapshot delete %s: no data received') % gluster_mgr.volume ) outxml = etree.fromstring(out) gluster_mgr.xml_response_check(outxml, args[1:]) def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" gmgr = self._share_manager(share) self.gluster_used_vols.add(gmgr.qualified) gmgr.set_vol_option(USER_MANILA_SHARE, share['id']) # Debt... def manage_existing(self, share, driver_options): raise NotImplementedError() def unmanage(self, share): raise NotImplementedError() def extend_share(self, share, new_size, share_server=None): raise NotImplementedError() def shrink_share(self, share, new_size, share_server=None): raise NotImplementedError() manila-2.0.0/manila/share/drivers/glusterfs/glusterfs_native.py0000664000567000056710000002302512701407107026111 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS native protocol (glusterfs) driver for shares. Manila share is a GlusterFS volume. Unlike the generic driver, this does not use service VM approach. Instances directly talk with the GlusterFS backend storage pool. Instance use the 'glusterfs' protocol to mount the GlusterFS share. Access to the share is allowed via SSL Certificates. Only the instance which has the SSL trust established with the GlusterFS backend can mount and hence use the share. Supports working with multiple glusterfs volumes. """ import re from oslo_log import log from manila import exception from manila.i18n import _ from manila.i18n import _LW from manila.share import driver from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) ACCESS_TYPE_CERT = 'cert' AUTH_SSL_ALLOW = 'auth.ssl-allow' CLIENT_SSL = 'client.ssl' NFS_EXPORT_VOL = 'nfs.export-volumes' SERVER_SSL = 'server.ssl' DYNAMIC_AUTH = 'server.dynamic-auth' class GlusterfsNativeShareDriver(driver.ExecuteMixin, layout.GlusterfsShareDriverBase): """GlusterFS native protocol (glusterfs) share driver. Executes commands relating to Shares. Supports working with multiple glusterfs volumes. API version history: 1.0 - Initial version. 1.1 - Support for working with multiple gluster volumes. """ GLUSTERFS_VERSION_MIN = (3, 6) supported_layouts = ('layout_volume.GlusterfsVolumeMappedLayout',) supported_protocols = ('GLUSTERFS',) def __init__(self, *args, **kwargs): super(GlusterfsNativeShareDriver, self).__init__( False, *args, **kwargs) self._helpers = None self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'GlusterFS-Native' def _setup_via_manager(self, share_mgr, share_mgr_parent=None): # Enable gluster volumes for SSL access only. gluster_mgr = share_mgr['manager'] gluster_mgr_parent = (share_mgr_parent or {}).get('manager', None) ssl_allow_opt = (gluster_mgr_parent if gluster_mgr_parent else gluster_mgr).get_vol_option( AUTH_SSL_ALLOW) if not ssl_allow_opt: # Not having AUTH_SSL_ALLOW set is a problematic edge case. # - In GlusterFS 3.6, it implies that access is allowed to # none, including intra-service access, which causes # problems internally in GlusterFS # - In GlusterFS 3.7, it implies that access control is # disabled, which defeats the purpose of this driver -- # so to avoid these possibilities, we throw an error in this case. msg = (_("Option %(option)s is not defined on gluster volume. " "Volume: %(volname)s") % {'volname': gluster_mgr.volume, 'option': AUTH_SSL_ALLOW}) LOG.error(msg) raise exception.GlusterfsException(msg) gluster_actions = [] if gluster_mgr_parent: # The clone of the snapshot, a new volume, retains the authorized # access list of the snapshotted volume/share, which includes TLS # identities of the backend servers, Manila hosts and clients. # Retain the identities of the GlusterFS servers and Manila host, # and exclude those of the clients in the authorized access list of # the new volume. The TLS identities of GlusterFS servers are # determined as those that are prefixed by 'glusterfs-server'. # And the TLS identity of the Manila host is identified as the # one that has 'manila-host' as the prefix. # Wrt. GlusterFS' parsing of auth.ssl-allow, please see code from # https://github.com/gluster/glusterfs/blob/v3.6.2/ # xlators/protocol/auth/login/src/login.c#L80 # until end of gf_auth() function old_access_list = re.split('[ ,]', ssl_allow_opt) glusterfs_server_CN_pattern = '\Aglusterfs-server' manila_host_CN_pattern = '\Amanila-host' regex = re.compile( '%(pattern1)s|%(pattern2)s' % { 'pattern1': glusterfs_server_CN_pattern, 'pattern2': manila_host_CN_pattern}) access_to = ','.join(filter(regex.match, old_access_list)) gluster_actions.append((AUTH_SSL_ALLOW, access_to)) for option, value in ( (NFS_EXPORT_VOL, False), (CLIENT_SSL, True), (SERVER_SSL, True) ): gluster_actions.append((option, value)) for action in gluster_actions: gluster_mgr.set_vol_option(*action) gluster_mgr.set_vol_option(DYNAMIC_AUTH, True, ignore_failure=True) # SSL enablement requires a fresh volume start # to take effect if gluster_mgr_parent: # in this case the volume is not started # yet (will only be started after this func # returns), so we have nothing to do here pass else: common._restart_gluster_vol(gluster_mgr) return gluster_mgr.export @utils.synchronized("glusterfs_native_access", external=False) def _allow_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Allow access to a share using certs. Add the SSL CN (Common Name) that's allowed to access the server. """ if access['access_type'] != ACCESS_TYPE_CERT: raise exception.InvalidShareAccess(_("Only 'cert' access type " "allowed")) ssl_allow_opt = gluster_mgr.get_vol_option(AUTH_SSL_ALLOW) # wrt. GlusterFS' parsing of auth.ssl-allow, please see code from # https://github.com/gluster/glusterfs/blob/v3.6.2/ # xlators/protocol/auth/login/src/login.c#L80 # until end of gf_auth() function ssl_allow = re.split('[ ,]', ssl_allow_opt) access_to = access['access_to'] if access_to in ssl_allow: LOG.warning(_LW("Access to %(share)s at %(export)s is already " "granted for %(access_to)s. GlusterFS volume " "options might have been changed externally."), {'share': share['id'], 'export': gluster_mgr.qualified, 'access_to': access_to}) return ssl_allow.append(access_to) ssl_allow_opt = ','.join(ssl_allow) gluster_mgr.set_vol_option(AUTH_SSL_ALLOW, ssl_allow_opt) @utils.synchronized("glusterfs_native_access", external=False) def _deny_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Deny access to a share that's using cert based auth. Remove the SSL CN (Common Name) that's allowed to access the server. """ if access['access_type'] != ACCESS_TYPE_CERT: raise exception.InvalidShareAccess(_("Only 'cert' access type " "allowed for access " "removal.")) ssl_allow_opt = gluster_mgr.get_vol_option(AUTH_SSL_ALLOW) ssl_allow = re.split('[ ,]', ssl_allow_opt) access_to = access['access_to'] if access_to not in ssl_allow: LOG.warning(_LW("Access to %(share)s at %(export)s is already " "denied for %(access_to)s. GlusterFS volume " "options might have been changed externally."), {'share': share['id'], 'export': gluster_mgr.qualified, 'access_to': access_to}) return ssl_allow.remove(access_to) ssl_allow_opt = ','.join(ssl_allow) gluster_mgr.set_vol_option(AUTH_SSL_ALLOW, ssl_allow_opt) dynauth = gluster_mgr.get_vol_option(DYNAMIC_AUTH, boolean=True) if not dynauth: common._restart_gluster_vol(gluster_mgr) def _update_share_stats(self): """Send stats info for the GlusterFS volume.""" data = dict( share_backend_name=self.backend_name, vendor_name='Red Hat', driver_version='1.1', storage_protocol='glusterfs', reserved_percentage=self.configuration.reserved_share_percentage) # We don't use a service mount to get stats data. # Instead we use glusterfs quota feature and use that to limit # the share to its expected share['size']. # TODO(deepakcs): Change below once glusterfs supports volume # specific stats via the gluster cli. data['total_capacity_gb'] = 'unknown' data['free_capacity_gb'] = 'unknown' super(GlusterfsNativeShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): return 0 manila-2.0.0/manila/share/drivers/helpers.py0000664000567000056710000005176112701407107022161 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_log import log from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.i18n import _LW from manila import utils LOG = log.getLogger(__name__) class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, ssh_execute, config_object): self.configuration = config_object self._execute = execute self._ssh_exec = ssh_execute def init_helper(self, server): pass def create_export(self, server, share_name, recreate=False): """Create new export, delete old one if exists.""" raise NotImplementedError() def remove_export(self, server, share_name): """Remove export.""" raise NotImplementedError() def configure_access(self, server, share_name): """Configure server before allowing access.""" pass def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param server: None or Share server's backend details :param share_name: Share's path according to id. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. """ raise NotImplementedError() @staticmethod def _verify_server_has_public_address(server): if 'public_address' not in server: raise exception.ManilaException( _("Can not get 'public_address' for generation of export.")) def get_exports_for_share(self, server, old_export_location): """Returns list of exports based on server info.""" raise NotImplementedError() def get_share_path_by_export_location(self, server, export_location): """Returns share path by its export location.""" raise NotImplementedError() def disable_access_for_maintenance(self, server, share_name): """Disables access to share to perform maintenance operations.""" def restore_access_after_maintenance(self, server, share_name): """Enables access to share after maintenance operations were done.""" @staticmethod def validate_access_rules(access_rules, allowed_types, allowed_levels): """Validates access rules according to access_type and access_level. :param access_rules: List of access rules to be validated. :param allowed_types: tuple of allowed type values. :param allowed_levels: tuple of allowed level values. """ for access in (access_rules or []): access_type = access['access_type'] access_level = access['access_level'] if access_type not in allowed_types: reason = _("Only %s access type allowed.") % ( ', '.join(tuple(["'%s'" % x for x in allowed_types]))) raise exception.InvalidShareAccess(reason=reason) if access_level not in allowed_levels: raise exception.InvalidShareAccessLevel(level=access_level) def _get_maintenance_file_path(self, share_name): return os.path.join(self.configuration.share_mount_path, "%s.maintenance" % share_name) def nfs_synchronized(f): def wrapped_func(self, *args, **kwargs): key = "nfs-%s" % args[0].get("lock_name", args[0]["instance_id"]) # NOTE(vponomaryov): 'external' lock is required for DHSS=False # mode of LVM and Generic drivers, that may have lots of # driver instances on single host. @utils.synchronized(key, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func class NFSHelper(NASHelperBase): """Interface to work with share.""" def create_export(self, server, share_name, recreate=False): """Create new export, delete old one if exists.""" return ':'.join((server['public_address'], os.path.join( self.configuration.share_mount_path, share_name))) def init_helper(self, server): try: self._ssh_exec(server, ['sudo', 'exportfs']) except exception.ProcessExecutionError as e: if 'command not found' in e.stderr: raise exception.ManilaException( _('NFS server is not installed on %s') % server['instance_id']) LOG.error(e.stderr) def remove_export(self, server, share_name): """Remove export.""" def _get_parsed_access_to(self, access_to): netmask = utils.cidr_to_netmask(access_to) if netmask == '255.255.255.255': return access_to.split('/')[0] return access_to.split('/')[0] + '/' + netmask @nfs_synchronized def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. """ local_path = os.path.join(self.configuration.share_mount_path, share_name) out, err = self._ssh_exec(server, ['sudo', 'exportfs']) # Recovery mode if not (add_rules or delete_rules): self.validate_access_rules( access_rules, ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) hosts = self._get_host_list(out, local_path) for host in hosts: self._ssh_exec(server, ['sudo', 'exportfs', '-u', ':'.join((host, local_path))]) self._sync_nfs_temp_and_perm_files(server) for access in access_rules: rules_options = '%s,no_subtree_check' if access['access_level'] == const.ACCESS_LEVEL_RW: rules_options = ','.join((rules_options, 'no_root_squash')) self._ssh_exec( server, ['sudo', 'exportfs', '-o', rules_options % access['access_level'], ':'.join((self._get_parsed_access_to(access['access_to']), local_path))]) self._sync_nfs_temp_and_perm_files(server) # Adding/Deleting specific rules else: self.validate_access_rules( add_rules, ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) for access in delete_rules: access['access_to'] = self._get_parsed_access_to( access['access_to']) try: self.validate_access_rules( [access], ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): LOG.warning(_LW( "Unsupported access level %(level)s or access type " "%(type)s, skipping removal of access rule to " "%(to)s.") % {'level': access['access_level'], 'type': access['access_type'], 'to': access['access_to']}) continue self._ssh_exec(server, ['sudo', 'exportfs', '-u', ':'.join((access['access_to'], local_path))]) if delete_rules: self._sync_nfs_temp_and_perm_files(server) for access in add_rules: access['access_to'] = self._get_parsed_access_to( access['access_to']) found_item = re.search( re.escape(local_path) + '[\s\n]*' + re.escape( access['access_to']), out) if found_item is not None: LOG.warning(_LW("Access rule %(type)s:%(to)s already " "exists for share %(name)s") % { 'to': access['access_to'], 'type': access['access_type'], 'name': share_name }) else: rules_options = '%s,no_subtree_check' if access['access_level'] == const.ACCESS_LEVEL_RW: rules_options = ','.join((rules_options, 'no_root_squash')) self._ssh_exec( server, ['sudo', 'exportfs', '-o', rules_options % access['access_level'], ':'.join((access['access_to'], local_path))]) if add_rules: self._sync_nfs_temp_and_perm_files(server) def _get_host_list(self, output, local_path): entries = [] output = output.replace('\n\t\t', ' ') lines = output.split('\n') for line in lines: items = line.split(' ') if local_path == items[0]: entries.append(items[1]) return entries def _sync_nfs_temp_and_perm_files(self, server): """Sync changes of exports with permanent NFS config file. This is required to ensure, that after share server reboot, exports still exist. """ sync_cmd = [ 'sudo', 'cp', const.NFS_EXPORTS_FILE_TEMP, const.NFS_EXPORTS_FILE ] self._ssh_exec(server, sync_cmd) self._ssh_exec(server, ['sudo', 'exportfs', '-a']) out, _ = self._ssh_exec( server, ['sudo', 'service', 'nfs-kernel-server', 'status'], check_exit_code=False) if "not" in out: self._ssh_exec( server, ['sudo', 'service', 'nfs-kernel-server', 'restart']) def get_exports_for_share(self, server, old_export_location): self._verify_server_has_public_address(server) path = old_export_location.split(':')[-1] return [':'.join((server['public_address'], path))] def get_share_path_by_export_location(self, server, export_location): return export_location.split(':')[-1] @nfs_synchronized def disable_access_for_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) backup_exports = [ 'cat', const.NFS_EXPORTS_FILE, '| grep', share_name, '| sudo tee', maintenance_file ] self._ssh_exec(server, backup_exports) local_path = os.path.join(self.configuration.share_mount_path, share_name) self._ssh_exec(server, ['sudo', 'exportfs', '-u', local_path]) self._sync_nfs_temp_and_perm_files(server) @nfs_synchronized def restore_access_after_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) restore_exports = [ 'cat', maintenance_file, '| sudo tee -a', const.NFS_EXPORTS_FILE, '&& sudo exportfs -r', '&& sudo rm -f', maintenance_file ] self._ssh_exec(server, restore_exports) class CIFSHelperIPAccess(NASHelperBase): """Manage shares in samba server by net conf tool. Class provides functionality to operate with CIFS shares. Samba server should be configured to use registry as configuration backend to allow dynamically share managements. This class allows to define access to shares by IPs with RW access level. """ def __init__(self, *args): super(CIFSHelperIPAccess, self).__init__(*args) self.export_format = '\\\\%s\\%s' self.parameters = { 'browseable': 'yes', '\"create mask\"': '0755', '\"hosts deny\"': '0.0.0.0/0', # deny all by default '\"hosts allow\"': '127.0.0.1', '\"read only\"': 'no', } def init_helper(self, server): # This is smoke check that we have required dependency self._ssh_exec(server, ['sudo', 'net', 'conf', 'list']) def create_export(self, server, share_name, recreate=False): """Create share at samba server.""" share_path = os.path.join(self.configuration.share_mount_path, share_name) create_cmd = [ 'sudo', 'net', 'conf', 'addshare', share_name, share_path, 'writeable=y', 'guest_ok=y', ] try: self._ssh_exec( server, ['sudo', 'net', 'conf', 'showshare', share_name, ]) except exception.ProcessExecutionError: # Share does not exist, create it try: self._ssh_exec(server, create_cmd) except Exception as child_e: msg = _("Could not create CIFS export %s.") % share_name LOG.exception(child_e) LOG.error(msg) raise exception.ManilaException(reason=msg) else: # Share exists if recreate: self._ssh_exec( server, ['sudo', 'net', 'conf', 'delshare', share_name, ]) try: self._ssh_exec(server, create_cmd) except Exception as e: msg = _("Could not create CIFS export %s.") % share_name LOG.exception(e) LOG.error(msg) raise exception.ManilaException(reason=msg) else: msg = _('Share section %s already defined.') % share_name raise exception.ShareBackendException(msg=msg) for param, value in self.parameters.items(): self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, param, value]) return self.export_format % (server['public_address'], share_name) def remove_export(self, server, share_name): """Remove share definition from samba server.""" try: self._ssh_exec( server, ['sudo', 'net', 'conf', 'delshare', share_name]) except exception.ProcessExecutionError as e: LOG.warning(_LW("Caught error trying delete share: %(error)s, try" "ing delete it forcibly."), {'error': e.stderr}) self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share', share_name]) def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. For this specific implementation, add_rules and delete_rules parameters are not used. """ hosts = [] self.validate_access_rules( access_rules, ('ip',), (const.ACCESS_LEVEL_RW,)) for access in access_rules: hosts.append(access['access_to']) self._set_allow_hosts(server, hosts, share_name) def _get_allow_hosts(self, server, share_name): (out, _) = self._ssh_exec(server, ['sudo', 'net', 'conf', 'getparm', share_name, '\"hosts allow\"']) return out.split() def _set_allow_hosts(self, server, hosts, share_name): value = "\"" + ' '.join(hosts) + "\"" self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, '\"hosts allow\"', value]) @staticmethod def _get_share_group_name_from_export_location(export_location): if '/' in export_location and '\\' in export_location: pass elif export_location.startswith('\\\\'): return export_location.split('\\')[-1] elif export_location.startswith('//'): return export_location.split('/')[-1] msg = _("Got incorrect CIFS export location '%s'.") % export_location raise exception.InvalidShare(reason=msg) def get_exports_for_share(self, server, old_export_location): self._verify_server_has_public_address(server) group_name = self._get_share_group_name_from_export_location( old_export_location) data = dict(ip=server['public_address'], share=group_name) return ['\\\\%(ip)s\\%(share)s' % data] def get_share_path_by_export_location(self, server, export_location): # Get name of group that contains share data on CIFS server group_name = self._get_share_group_name_from_export_location( export_location) # Get parameter 'path' from group that belongs to current share (out, __) = self._ssh_exec( server, ['sudo', 'net', 'conf', 'getparm', group_name, 'path']) # Remove special symbols from response and return path return out.strip() def disable_access_for_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) allowed_hosts = " ".join(self._get_allow_hosts(server, share_name)) backup_exports = [ 'echo', "'%s'" % allowed_hosts, '| sudo tee', maintenance_file ] self._ssh_exec(server, backup_exports) self._set_allow_hosts(server, [], share_name) def restore_access_after_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) (exports, __) = self._ssh_exec(server, ['cat', maintenance_file]) self._set_allow_hosts(server, exports.split(), share_name) self._ssh_exec(server, ['sudo rm -f', maintenance_file]) class CIFSHelperUserAccess(CIFSHelperIPAccess): """Manage shares in samba server by net conf tool. Class provides functionality to operate with CIFS shares. Samba server should be configured to use registry as configuration backend to allow dynamically share managements. This class allows to define access to shares by usernames with either RW or RO access levels. """ def __init__(self, *args): super(CIFSHelperUserAccess, self).__init__(*args) self.export_format = '//%s/%s' self.parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts allow': '0.0.0.0/0', 'read only': 'no', } def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. For this specific implementation, add_rules and delete_rules parameters are not used. """ all_users_rw = [] all_users_ro = [] self.validate_access_rules( access_rules, ('user',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) for access in access_rules: if access['access_level'] == const.ACCESS_LEVEL_RW: all_users_rw.append(access['access_to']) else: all_users_ro.append(access['access_to']) self._set_valid_users( server, all_users_rw, share_name, const.ACCESS_LEVEL_RW) self._set_valid_users( server, all_users_ro, share_name, const.ACCESS_LEVEL_RO) def _get_conf_param(self, access_level): if access_level == const.ACCESS_LEVEL_RW: return 'valid users' else: return 'read list' def _set_valid_users(self, server, users, share_name, access_level): value = "\"" + ' '.join(users) + "\"" param = self._get_conf_param(access_level) self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, param, value]) manila-2.0.0/manila/share/drivers/windows/0000775000567000056710000000000012701407265021632 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/windows/service_instance.py0000664000567000056710000003107512701407107025531 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _, _LI, _LW from manila.share.drivers import service_instance from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper CONF = cfg.CONF LOG = log.getLogger(__name__) windows_share_server_opts = [ cfg.StrOpt( "winrm_cert_pem_path", default="~/.ssl/cert.pem", help="Path to the x509 certificate used for accessing the service" "instance."), cfg.StrOpt( "winrm_cert_key_pem_path", default="~/.ssl/key.pem", help="Path to the x509 certificate key."), cfg.BoolOpt( "winrm_use_cert_based_auth", default=False, help="Use x509 certificates in order to authenticate to the" "service instance.") ] CONF = cfg.CONF CONF.register_opts(windows_share_server_opts) class WindowsServiceInstanceManager(service_instance.ServiceInstanceManager): """"Manages Windows Nova instances.""" _INSTANCE_CONNECTION_PROTO = "WinRM" _CBS_INIT_RUN_PLUGIN_AFTER_REBOOT = 2 _CBS_INIT_WINRM_PLUGIN = "ConfigWinRMListenerPlugin" _DEFAULT_MINIMUM_PASS_LENGTH = 6 def __init__(self, driver_config=None, remote_execute=None): super(WindowsServiceInstanceManager, self).__init__( driver_config=driver_config) driver_config.append_config_values(windows_share_server_opts) self._use_cert_auth = self.get_config_option( "winrm_use_cert_based_auth") self._cert_pem_path = self.get_config_option( "winrm_cert_pem_path") self._cert_key_pem_path = self.get_config_option( "winrm_cert_key_pem_path") self._check_auth_mode() self._remote_execute = (remote_execute or winrm_helper.WinRMHelper( configuration=driver_config).execute) self._windows_utils = windows_utils.WindowsUtils( remote_execute=self._remote_execute) def _check_auth_mode(self): if self._use_cert_auth: if not (os.path.exists(self._cert_pem_path) and os.path.exists(self._cert_key_pem_path)): msg = _("Certificate based authentication was configured " "but one or more certificates are missing.") raise exception.ServiceInstanceException(msg) LOG.debug("Using certificate based authentication for " "service instances.") else: instance_password = self.get_config_option( "service_instance_password") if not self._check_password_complexity(instance_password): msg = _("The configured service instance password does not " "match the minimum complexity requirements. " "The password must contain at least %s characters. " "Also, it must contain at least one digit, " "one lower case and one upper case character.") raise exception.ServiceInstanceException( msg % self._DEFAULT_MINIMUM_PASS_LENGTH) LOG.debug("Using password based authentication for " "service instances.") def _get_auth_info(self): auth_info = {'use_cert_auth': self._use_cert_auth} if self._use_cert_auth: auth_info.update(cert_pem_path=self._cert_pem_path, cert_key_pem_path=self._cert_key_pem_path) return auth_info def get_common_server(self): data = super(WindowsServiceInstanceManager, self).get_common_server() data['backend_details'].update(self._get_auth_info()) return data def _get_new_instance_details(self, server): instance_details = super(WindowsServiceInstanceManager, self)._get_new_instance_details(server) instance_details.update(self._get_auth_info()) return instance_details def _check_password_complexity(self, password): # Make sure that the Windows complexity requirements are met: # http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx if len(password) < self._DEFAULT_MINIMUM_PASS_LENGTH: return False for r in ("[a-z]", "[A-Z]", "[0-9]"): if not re.search(r, password): return False return True def _test_server_connection(self, server): try: self._remote_execute(server, "whoami", retry=False) LOG.debug("Service VM %s is available via WinRM", server['ip']) return True except Exception as ex: LOG.debug("Server %(ip)s is not available via WinRM. " "Exception: %(ex)s ", dict(ip=server['ip'], ex=ex)) return False def _get_service_instance_create_kwargs(self): create_kwargs = {} if self._use_cert_auth: # At the moment, we pass the x509 certificate via user data. # We'll use keypairs instead as soon as the nova client will # support x509 certificates. with open(self._cert_pem_path, 'r') as f: cert_pem_data = f.read() create_kwargs['user_data'] = cert_pem_data else: # The admin password has to be specified via instance metadata in # order to be passed to the instance via the metadata service or # configdrive. admin_pass = self.get_config_option("service_instance_password") create_kwargs['meta'] = {'admin_pass': admin_pass} return create_kwargs def set_up_service_instance(self, context, network_info): instance_details = super(WindowsServiceInstanceManager, self).set_up_service_instance(context, network_info) security_services = network_info['security_services'] security_service = self.get_valid_security_service(security_services) if security_service: self._setup_security_service(instance_details, security_service) instance_details['joined_domain'] = bool(security_service) return instance_details def _setup_security_service(self, server, security_service): domain = security_service['domain'] admin_username = security_service['user'] admin_password = security_service['password'] dns_ip = security_service['dns_ip'] self._windows_utils.set_dns_client_search_list(server, [domain]) if_index = self._windows_utils.get_interface_index_by_ip(server, server['ip']) self._windows_utils.set_dns_client_server_addresses(server, if_index, [dns_ip]) # Joining an AD domain will alter the WinRM Listener configuration. # Cloudbase-init is required to be running on the Windows service # instance, so we re-enable the plugin configuring the WinRM listener. # # TODO(lpetrut): add a config option so that we may rely on the AD # group policies taking care of the WinRM configuration. self._run_cloudbase_init_plugin_after_reboot( server, plugin_name=self._CBS_INIT_WINRM_PLUGIN) self._join_domain(server, domain, admin_username, admin_password) def _join_domain(self, server, domain, admin_username, admin_password): # As the WinRM configuration may be altered and existing connections # closed, we may not be able to retrieve the result of this operation. # Instead, we'll ensure that the instance actually joined the domain # after the reboot. try: self._windows_utils.join_domain(server, domain, admin_username, admin_password) except processutils.ProcessExecutionError: raise except Exception as exc: LOG.debug("Unexpected error while attempting to join domain " "%(domain)s. Verifying the result of the operation " "after instance reboot. Exception: %(exc)s", dict(domain=domain, exc=exc)) # We reboot the service instance using the Compute API so that # we can wait for it to become active. self.reboot_server(server, soft_reboot=True) self.wait_for_instance_to_be_active( server['instance_id'], timeout=self.max_time_to_build_instance) if not self._check_server_availability(server): raise exception.ServiceInstanceException( _('%(conn_proto)s connection has not been ' 'established to %(server)s in %(time)ss. Giving up.') % { 'conn_proto': self._INSTANCE_CONNECTION_PROTO, 'server': server['ip'], 'time': self.max_time_to_build_instance}) current_domain = self._windows_utils.get_current_domain(server) if current_domain != domain: err_msg = _("Failed to join domain %(requested_domain)s. " "Current domain: %(current_domain)s") raise exception.ServiceInstanceException( err_msg % dict(requested_domain=domain, current_domain=current_domain)) def get_valid_security_service(self, security_services): if not security_services: LOG.info(_LI("No security services provided.")) elif len(security_services) > 1: LOG.warning(_LW("Multiple security services provided. Only one " "security service of type 'active_directory' " "is supported.")) else: security_service = security_services[0] security_service_type = security_service['type'] if security_service_type == 'active_directory': return security_service else: LOG.warning(_LW("Only security services of type " "'active_directory' are supported. " "Retrieved security " "service type: %(sec_type)s."), {'sec_type': security_service_type}) return None def _run_cloudbase_init_plugin_after_reboot(self, server, plugin_name): cbs_init_reg_section = self._get_cbs_init_reg_section(server) plugin_key_path = "%(cbs_init_section)s\\%(instance_id)s\\Plugins" % { 'cbs_init_section': cbs_init_reg_section, 'instance_id': server['instance_id'] } self._windows_utils.set_win_reg_value( server, path=plugin_key_path, key=plugin_name, value=self._CBS_INIT_RUN_PLUGIN_AFTER_REBOOT) def _get_cbs_init_reg_section(self, server): base_path = 'hklm:\\SOFTWARE' cbs_section = 'Cloudbase Solutions\\Cloudbase-Init' for upper_section in ('', 'Wow6432Node'): cbs_init_section = self._windows_utils.normalize_path( os.path.join(base_path, upper_section, cbs_section)) try: self._windows_utils.get_win_reg_value( server, path=cbs_init_section) return cbs_init_section except processutils.ProcessExecutionError as ex: # The exit code will always be '1' in case of errors, so the # only way to determine the error type is checking stderr. if 'Cannot find path' in ex.stderr: continue else: raise raise exception.ServiceInstanceException( _("Could not retrieve Cloudbase Init registry section")) manila-2.0.0/manila/share/drivers/windows/__init__.py0000664000567000056710000000000012701407107023724 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/drivers/windows/windows_smb_driver.py0000664000567000056710000002060412701407107026107 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _LW from manila.share import driver as base_driver from manila.share.drivers import generic from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper LOG = log.getLogger(__name__) class WindowsSMBDriver(generic.GenericShareDriver): # NOTE(lpetrut): The first partition will be reserved by the OS. _DEFAULT_SHARE_PARTITION = 2 def __init__(self, *args, **kwargs): super(WindowsSMBDriver, self).__init__(*args, **kwargs) self._remote_execute = winrm_helper.WinRMHelper( configuration=self.configuration).execute self._windows_utils = windows_utils.WindowsUtils( remote_execute=self._remote_execute) self._smb_helper = windows_smb_helper.WindowsSMBHelper( remote_execute=self._remote_execute, configuration=self.configuration) def _update_share_stats(self, data=None): base_driver.ShareDriver._update_share_stats( self, data=dict(storage_protocol="CIFS")) def _setup_service_instance_manager(self): self.service_instance_manager = ( service_instance.WindowsServiceInstanceManager( driver_config=self.configuration)) def _setup_helpers(self): self._helpers = {key: self._smb_helper for key in ("SMB", "CIFS")} def _teardown_server(self, server_details, security_services=None): security_service = ( self.service_instance_manager.get_valid_security_service( security_services)) if server_details.get('joined_domain') and security_service: try: self._windows_utils.unjoin_domain(server_details, security_service['user'], security_service['password']) except Exception as exc: LOG.warning(_LW("Failed to remove service instance " "%(instance_id)s from domain %(domain)s. " "Exception: %(exc)s."), dict(instance_id=server_details['instance_id'], domain=security_service['domain'], exc=exc)) super(WindowsSMBDriver, self)._teardown_server(server_details, security_services) def _format_device(self, server_details, volume): disk_number = self._get_disk_number(server_details, volume) self._windows_utils.initialize_disk(server_details, disk_number) self._windows_utils.create_partition(server_details, disk_number) self._windows_utils.format_partition( server_details, disk_number, self._DEFAULT_SHARE_PARTITION) def _mount_device(self, share, server_details, volume): mount_path = self._get_mount_path(share) if not self._is_device_mounted(mount_path, server_details, volume): disk_number = self._get_disk_number(server_details, volume) self._windows_utils.ensure_directory_exists(server_details, mount_path) self._ensure_disk_online_and_writable(server_details, disk_number) self._windows_utils.add_access_path(server_details, mount_path, disk_number, self._DEFAULT_SHARE_PARTITION) def _unmount_device(self, share, server_details): mount_path = self._get_mount_path(share) disk_number = self._windows_utils.get_disk_number_by_mount_path( server_details, mount_path) self._windows_utils.remove(server_details, mount_path, is_junction=True) if disk_number: self._windows_utils.set_disk_online_status( server_details, disk_number, online=False) def _resize_filesystem(self, server_details, volume, new_size=None): disk_number = self._get_disk_number(server_details, volume) self._ensure_disk_online_and_writable(server_details, disk_number) if not new_size: new_size_bytes = self._windows_utils.get_partition_maximum_size( server_details, disk_number, self._DEFAULT_SHARE_PARTITION) else: new_size_bytes = new_size * units.Gi self._windows_utils.resize_partition(server_details, new_size_bytes, disk_number, self._DEFAULT_SHARE_PARTITION) def _ensure_disk_online_and_writable(self, server_details, disk_number): self._windows_utils.update_disk(server_details, disk_number) self._windows_utils.set_disk_readonly_status( server_details, disk_number, readonly=False) self._windows_utils.set_disk_online_status( server_details, disk_number, online=True) def _get_mounted_share_size(self, mount_path, server_details): total_bytes = self._windows_utils.get_disk_space_by_path( server_details, mount_path)[0] return float(total_bytes) / units.Gi def _get_consumed_space(self, mount_path, server_details): total_bytes, free_bytes = self._windows_utils.get_disk_space_by_path( server_details, mount_path) return float(total_bytes - free_bytes) / units.Gi def _get_mount_path(self, share): mount_path = os.path.join(self.configuration.share_mount_path, share['name']) return self._windows_utils.normalize_path(mount_path) def _get_disk_number(self, server_details, volume): disk_number = self._windows_utils.get_disk_number_by_serial_number( server_details, volume['id']) if disk_number is None: LOG.debug("Could not identify the mounted disk by serial number " "using the volume id %(volume_id)s. Attempting to " "retrieve it by the volume mount point %(mountpoint)s.", dict(volume_id=volume['id'], mountpoint=volume['mountpoint'])) # Assumes the mount_point will be something like /dev/hdX mount_point = volume['mountpoint'] disk_number = ord(mount_point[-1]) - ord('a') return disk_number def _is_device_mounted(self, mount_path, server_details, volume=None): disk_number = self._windows_utils.get_disk_number_by_mount_path( server_details, mount_path) return disk_number is not None @generic.ensure_server def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" # NOTE(vponomaryov): use direct verification for case some additional # level is added. access_level = access['access_level'] if access_level not in (const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO): raise exception.InvalidShareAccessLevel(level=access_level) self._get_helper(share).allow_access( share_server['backend_details'], share['name'], access['access_type'], access['access_level'], access['access_to']) @generic.ensure_server def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" self._get_helper(share).deny_access( share_server['backend_details'], share['name'], access) manila-2.0.0/manila/share/drivers/windows/winrm_helper.py0000664000567000056710000001455312701407107024702 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils import six from manila import exception from manila.i18n import _ from manila import utils LOG = log.getLogger(__name__) CONF = cfg.CONF winrm_opts = [ cfg.IntOpt( 'winrm_conn_timeout', default=60, help='WinRM connection timeout.'), cfg.IntOpt( 'winrm_operation_timeout', default=60, help='WinRM operation timeout.'), cfg.IntOpt( 'winrm_retry_count', default=3, help='WinRM retry count.'), cfg.IntOpt( 'winrm_retry_interval', default=5, help='WinRM retry interval in seconds'), ] CONF.register_opts(winrm_opts) DEFAULT_PORT_HTTP = 5985 DEFAULT_PORT_HTTPS = 5986 TRANSPORT_PLAINTEXT = 'plaintext' TRANSPORT_SSL = 'ssl' winrm = None def setup_winrm(): global winrm if not winrm: try: winrm = importutils.import_module('winrm') except ImportError: raise exception.ShareBackendException( _("PyWinrm is not installed")) class WinRMHelper(object): def __init__(self, configuration=None): if configuration: configuration.append_config_values(winrm_opts) self._config = configuration else: self._config = CONF setup_winrm() def _get_conn(self, server): auth = self._get_auth(server) conn = WinRMConnection( ip=server['ip'], conn_timeout=self._config.winrm_conn_timeout, operation_timeout=self._config.winrm_operation_timeout, **auth) return conn def execute(self, server, command, check_exit_code=True, retry=True): retries = self._config.winrm_retry_count if retry else 1 conn = self._get_conn(server) @utils.retry(exception=Exception, interval=self._config.winrm_retry_interval, retries=retries) def _execute(): parsed_cmd, sanitized_cmd = self._parse_command(command) LOG.debug("Executing command: %s", sanitized_cmd) (stdout, stderr, exit_code) = conn.execute(parsed_cmd) sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) LOG.debug("Executed command: %(cmd)s. Stdout: %(stdout)s. " "Stderr: %(stderr)s. Exit code %(exit_code)s", dict(cmd=sanitized_cmd, stdout=sanitized_stdout, stderr=sanitized_stderr, exit_code=exit_code)) if check_exit_code and exit_code != 0: raise processutils.ProcessExecutionError( stdout=sanitized_stdout, stderr=sanitized_stderr, exit_code=exit_code, cmd=sanitized_cmd) return (stdout, stderr) return _execute() def _parse_command(self, command): if isinstance(command, list) or isinstance(command, tuple): command = " ".join([six.text_type(c) for c in command]) sanitized_cmd = strutils.mask_password(command) b64_command = base64.b64encode(command.encode("utf_16_le")) command = ("powershell.exe -ExecutionPolicy RemoteSigned " "-NonInteractive -EncodedCommand %s" % b64_command) return command, sanitized_cmd def _get_auth(self, server): auth = {'username': server['username']} if server['use_cert_auth']: auth['cert_pem_path'] = server['cert_pem_path'] auth['cert_key_pem_path'] = server['cert_key_pem_path'] else: auth['password'] = server['password'] return auth class WinRMConnection(object): _URL_TEMPLATE = '%(protocol)s://%(ip)s:%(port)s/wsman' def __init__(self, ip=None, port=None, use_ssl=False, transport=None, username=None, password=None, cert_pem_path=None, cert_key_pem_path=None, operation_timeout=None, conn_timeout=None): setup_winrm() use_cert = bool(cert_pem_path and cert_key_pem_path) transport = (TRANSPORT_SSL if use_cert else TRANSPORT_PLAINTEXT) _port = port or self._get_default_port(use_cert) _url = self._get_url(ip, _port, use_cert) self._conn = winrm.protocol.Protocol( endpoint=_url, transport=transport, username=username, password=password, cert_pem=cert_pem_path, cert_key_pem=cert_key_pem_path) self._conn.transport.timeout = conn_timeout self._conn.set_timeout(operation_timeout) def _get_default_port(self, use_ssl): port = (DEFAULT_PORT_HTTPS if use_ssl else DEFAULT_PORT_HTTP) return port def _get_url(self, ip, port, use_ssl): if not ip: err_msg = _("No IP provided.") raise exception.ShareBackendException(msg=err_msg) protocol = 'https' if use_ssl else 'http' return self._URL_TEMPLATE % {'protocol': protocol, 'ip': ip, 'port': port} def execute(self, cmd): shell_id = None cmd_id = None try: shell_id = self._conn.open_shell() cmd_id = self._conn.run_command(shell_id, cmd) (stdout, stderr, exit_code) = self._conn.get_command_output(shell_id, cmd_id) finally: if cmd_id: self._conn.cleanup_command(shell_id, cmd_id) if shell_id: self._conn.close_shell(shell_id) return (stdout, stderr, exit_code) manila-2.0.0/manila/share/drivers/windows/windows_smb_helper.py0000664000567000056710000001456512701407107026104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _, _LI from manila.share.drivers import helpers from manila.share.drivers.windows import windows_utils LOG = log.getLogger(__name__) class WindowsSMBHelper(helpers.NASHelperBase): _SHARE_ACCESS_RIGHT_MAP = { constants.ACCESS_LEVEL_RW: "Change", constants.ACCESS_LEVEL_RO: "Read"} _ICACLS_ACCESS_RIGHT_MAP = { constants.ACCESS_LEVEL_RW: 'M', constants.ACCESS_LEVEL_RO: 'R'} def __init__(self, remote_execute, configuration): self._remote_exec = remote_execute self.configuration = configuration self._windows_utils = windows_utils.WindowsUtils( remote_execute=remote_execute) def init_helper(self, server): self._remote_exec(server, "Get-SmbShare") def create_export(self, server, share_name, recreate=False): export_location = '\\\\%s\\%s' % (server['public_address'], share_name) if not self._share_exists(server, share_name): share_path = self._windows_utils.normalize_path( os.path.join(self.configuration.share_mount_path, share_name)) cmd = ['New-SmbShare', '-Name', share_name, '-Path', share_path] self._remote_exec(server, cmd) else: LOG.info(_LI("Skipping creating export %s as it already exists."), share_name) return export_location def remove_export(self, server, share_name): if self._share_exists(server, share_name): cmd = ['Remove-SmbShare', '-Name', share_name, "-Force"] self._remote_exec(server, cmd) else: LOG.debug("Skipping removing export %s as it does not exist.", share_name) def _get_volume_path_by_share_name(self, server, share_name): share_path = self._get_share_path_by_name(server, share_name) volume_path = self._windows_utils.get_volume_path_by_mount_path( server, share_path) return volume_path def allow_access(self, server, share_name, access_type, access_level, access_to): """Add access for share.""" if access_type != 'user': reason = _('Only user access type allowed.') raise exception.InvalidShareAccess(reason=reason) self._grant_share_access(server, share_name, access_level, access_to) self._grant_share_path_access(server, share_name, access_level, access_to) def _grant_share_access(self, server, share_name, access_level, access_to): access_right = self._SHARE_ACCESS_RIGHT_MAP[access_level] cmd = ["Grant-SmbShareAccess", "-Name", share_name, "-AccessRight", access_right, "-AccountName", access_to, "-Force"] self._remote_exec(server, cmd) self._refresh_acl(server, share_name) def _grant_share_path_access(self, server, share_name, access_level, access_to): # Set NTFS level permissions access_right = self._ICACLS_ACCESS_RIGHT_MAP[access_level] ace = '"%(access_to)s:(OI)(CI)%(access_right)s"' % dict( access_to=access_to, access_right=access_right) vol_path = self._get_volume_path_by_share_name(server, share_name) cmd = ["icacls", self._windows_utils.quote_string(vol_path), "/grant", ace, "/t", "/c"] self._remote_exec(server, cmd) def _refresh_acl(self, server, share_name): cmd = ['Set-SmbPathAcl', '-ShareName', share_name] self._remote_exec(server, cmd) def deny_access(self, server, share_name, access, force=False): access_to = access['access_to'] self._revoke_share_access(server, share_name, access_to) self._revoke_share_path_access(server, share_name, access_to) def _revoke_share_access(self, server, share_name, access_to): cmd = ['Revoke-SmbShareAccess', '-Name', share_name, '-AccountName', access_to, '-Force'] self._remote_exec(server, cmd) self._refresh_acl(server, share_name) def _revoke_share_path_access(self, server, share_name, access_to): vol_path = self._get_volume_path_by_share_name(server, share_name) cmd = ["icacls", self._windows_utils.quote_string(vol_path), "/remove", access_to, "/t", "/c"] self._remote_exec(server, cmd) def _get_share_name(self, export_location): return self._windows_utils.normalize_path( export_location).split('\\')[-1] def get_exports_for_share(self, server, old_export_location): share_name = self._get_share_name(old_export_location) data = dict(ip=server['public_address'], share_name=share_name) return ['\\\\%(ip)s\\%(share_name)s' % data] def _get_share_path_by_name(self, server, share_name, ignore_missing=False): cmd = ('Get-SmbShare -Name %s | ' 'Select-Object -ExpandProperty Path' % share_name) check_exit_code = not ignore_missing (share_path, err) = self._remote_exec(server, cmd, check_exit_code=check_exit_code) return share_path.strip() if share_path else None def get_share_path_by_export_location(self, server, export_location): share_name = self._get_share_name(export_location) return self._get_share_path_by_name(server, share_name) def _share_exists(self, server, share_name): share_path = self._get_share_path_by_name(server, share_name, ignore_missing=True) return bool(share_path) manila-2.0.0/manila/share/drivers/windows/windows_utils.py0000664000567000056710000002232012701407107025110 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log from manila.i18n import _LI LOG = log.getLogger(__name__) class WindowsUtils(object): def __init__(self, remote_execute): self._remote_exec = remote_execute self._fsutil_total_space_regex = re.compile('of bytes *: ([0-9]*)') self._fsutil_free_space_regex = re.compile( 'of avail free bytes *: ([0-9]*)') def initialize_disk(self, server, disk_number): cmd = ["Initialize-Disk", "-Number", disk_number] self._remote_exec(server, cmd) def create_partition(self, server, disk_number): cmd = ["New-Partition", "-DiskNumber", disk_number, "-UseMaximumSize"] self._remote_exec(server, cmd) def format_partition(self, server, disk_number, partition_number): cmd = ("Get-Partition -DiskNumber %(disk_number)s " "-PartitionNumber %(partition_number)s | " "Format-Volume -FileSystem NTFS -Force -Confirm:$false" % { 'disk_number': disk_number, 'partition_number': partition_number, }) self._remote_exec(server, cmd) def add_access_path(self, server, mount_path, disk_number, partition_number): cmd = ["Add-PartitionAccessPath", "-DiskNumber", disk_number, "-PartitionNumber", partition_number, "-AccessPath", self.quote_string(mount_path)] self._remote_exec(server, cmd) def resize_partition(self, server, size_bytes, disk_number, partition_number): cmd = ['Resize-Partition', '-DiskNumber', disk_number, '-PartitionNumber', partition_number, '-Size', size_bytes] self._remote_exec(server, cmd) def get_disk_number_by_serial_number(self, server, serial_number): pattern = "%s*" % serial_number[:15] cmd = ("Get-Disk | " "Where-Object {$_.SerialNumber -like '%s'} | " "Select-Object -ExpandProperty Number" % pattern) (out, err) = self._remote_exec(server, cmd) return int(out) if (len(out) > 0) else None def get_disk_number_by_mount_path(self, server, mount_path): cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Select-Object -ExpandProperty DiskNumber' % (mount_path + "\\")) (out, err) = self._remote_exec(server, cmd) return int(out) if (len(out) > 0) else None def get_volume_path_by_mount_path(self, server, mount_path): cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Get-Volume | ' 'Select-Object -ExpandProperty Path' % (mount_path + "\\")) (out, err) = self._remote_exec(server, cmd) return out.strip() def get_disk_space_by_path(self, server, mount_path): cmd = ["fsutil", "volume", "diskfree", self.quote_string(mount_path)] (out, err) = self._remote_exec(server, cmd) total_bytes = int(self._fsutil_total_space_regex.findall(out)[0]) free_bytes = int(self._fsutil_free_space_regex.findall(out)[0]) return total_bytes, free_bytes def get_partition_maximum_size(self, server, disk_number, partition_number): cmd = ('Get-PartitionSupportedSize -DiskNumber %(disk_number)s ' '-PartitionNumber %(partition_number)s | ' 'Select-Object -ExpandProperty SizeMax' % dict(disk_number=disk_number, partition_number=partition_number)) (out, err) = self._remote_exec(server, cmd) max_bytes = int(out) return max_bytes def set_disk_online_status(self, server, disk_number, online=True): is_offline = int(not online) cmd = ["Set-Disk", "-Number", disk_number, "-IsOffline", is_offline] self._remote_exec(server, cmd) def set_disk_readonly_status(self, server, disk_number, readonly=False): cmd = ["Set-Disk", "-Number", disk_number, "-IsReadOnly", int(readonly)] self._remote_exec(server, cmd) def update_disk(self, server, disk_number): """Updates cached disk information.""" cmd = ["Update-Disk", disk_number] self._remote_exec(server, cmd) def join_domain(self, server, domain, admin_username, admin_password): # NOTE(lpetrut): An instance reboot is needed but this will be # performed using Nova so that the instance state can be # retrieved easier. LOG.info(_LI("Joining server %(ip)s to Active Directory " "domain %(domain)s"), dict(ip=server['ip'], domain=domain)) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % admin_username), ('Add-Computer -DomainName "%s" -Credential $credential' % domain)] cmd = ";".join(cmds) self._remote_exec(server, cmd) def unjoin_domain(self, server, admin_username, admin_password, reboot=False): cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % admin_username), ('Remove-Computer -UnjoinDomaincredential $credential ' '-Passthru -Verbose -Force')] cmd = ";".join(cmds) self._remote_exec(server, cmd) def get_current_domain(self, server): cmd = "(Get-WmiObject Win32_ComputerSystem).Domain" (out, err) = self._remote_exec(server, cmd) return out.strip() def ensure_directory_exists(self, server, path): cmd = ["New-Item", "-ItemType", "Directory", "-Force", "-Path", self.quote_string(path)] self._remote_exec(server, cmd) def remove(self, server, path, force=True, recurse=False, is_junction=False): if self.path_exists(server, path): if is_junction: cmd = ('[System.IO.Directory]::Delete(' '%(path)s, %(recurse)d)' % dict(path=self.quote_string(path), recurse=recurse)) else: cmd = ["Remove-Item", "-Confirm:$false", "-Path", self.quote_string(path)] if force: cmd += ['-Force'] if recurse: cmd += ['-Recurse'] self._remote_exec(server, cmd) else: LOG.debug("Skipping deleting path %s as it does " "not exist.", path) def path_exists(self, server, path): cmd = ["Test-Path", path] (out, _) = self._remote_exec(server, cmd) return out.strip() == "True" def normalize_path(self, path): return path.replace('/', '\\') def get_interface_index_by_ip(self, server, ip): cmd = ('Get-NetIPAddress | ' 'Where-Object {$_.IPAddress -eq "%(ip)s"} | ' 'Select-Object -ExpandProperty InterfaceIndex' % dict(ip=ip)) (out, err) = self._remote_exec(server, cmd) if_index = int(out) return if_index def set_dns_client_search_list(self, server, search_list): src_list = ",".join(["'%s'" % domain for domain in search_list]) cmd = ["Set-DnsClientGlobalSetting", "-SuffixSearchList", "@(%s)" % src_list] self._remote_exec(server, cmd) def set_dns_client_server_addresses(self, server, if_index, dns_servers): dns_sv_list = ",".join(["'%s'" % dns_sv for dns_sv in dns_servers]) cmd = ["Set-DnsClientServerAddress", "-InterfaceIndex", if_index, "-ServerAddresses", "(%s)" % dns_sv_list] self._remote_exec(server, cmd) def set_win_reg_value(self, server, path, key, value): cmd = ['Set-ItemProperty', '-Path', self.quote_string(path), '-Name', key, '-Value', value] self._remote_exec(server, cmd) def get_win_reg_value(self, server, path, name=None): cmd = "Get-ItemProperty -Path %s" % self.quote_string(path) if name: cmd += " | Select-Object -ExpandProperty %s" % name return self._remote_exec(server, cmd, retry=False)[0] def quote_string(self, string): return '"%s"' % string manila-2.0.0/manila/share/manager.py0000664000567000056710000037075312701407112020454 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 NetApp Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NAS share manager managers creating shares and access rights. **Related Flags** :share_driver: Used by :class:`ShareManager`. """ import copy import datetime from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils import six from manila.common import constants from manila import context from manila.data import rpcapi as data_rpcapi from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila import manager from manila import quota from manila.share import access import manila.share.configuration from manila.share import drivers_private_data from manila.share import migration from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) share_manager_opts = [ cfg.StrOpt('share_driver', default='manila.share.drivers.generic.GenericShareDriver', help='Driver to use for share creation.'), cfg.ListOpt('hook_drivers', default=[], help='Driver(s) to perform some additional actions before and ' 'after share driver actions and on a periodic basis. ' 'Default is [].', deprecated_group='DEFAULT'), cfg.BoolOpt('delete_share_server_with_last_share', default=False, help='Whether share servers will ' 'be deleted on deletion of the last share.'), cfg.BoolOpt('unmanage_remove_access_rules', default=False, help='If set to True, then manila will deny access and remove ' 'all access rules on share unmanage.' 'If set to False - nothing will be changed.'), cfg.BoolOpt('automatic_share_server_cleanup', default=True, help='If set to True, then Manila will delete all share ' 'servers which were unused more than specified time .' 'If set to False - automatic deletion of share servers ' 'will be disabled.', deprecated_group='DEFAULT'), cfg.IntOpt('unused_share_server_cleanup_interval', default=10, help='Unallocated share servers reclamation time interval ' '(minutes). Minimum value is 10 minutes, maximum is 60 ' 'minutes. The reclamation function is run every ' '10 minutes and delete share servers which were unused ' 'more than unused_share_server_cleanup_interval option ' 'defines. This value reflects the shortest time Manila ' 'will wait for a share server to go unutilized before ' 'deleting it.', deprecated_group='DEFAULT'), cfg.IntOpt('replica_state_update_interval', default=300, help='This value, specified in seconds, determines how often ' 'the share manager will poll for the health ' '(replica_state) of each replica instance.'), ] CONF = cfg.CONF CONF.register_opts(share_manager_opts) CONF.import_opt('periodic_hooks_interval', 'manila.share.hook') # Drivers that need to change module paths or class names can add their # old/new path here to maintain backward compatibility. MAPPING = { 'manila.share.drivers.netapp.cluster_mode.NetAppClusteredShareDriver': 'manila.share.drivers.netapp.common.NetAppDriver', 'manila.share.drivers.hp.hp_3par_driver.HP3ParShareDriver': 'manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver', 'manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver': 'manila.share.drivers.glusterfs.glusterfs_native.' 'GlusterfsNativeShareDriver', } QUOTAS = quota.QUOTAS def locked_share_replica_operation(operation): """Lock decorator for share replica operations. Takes a named lock prior to executing the operation. The lock is named with the id of the share to which the replica belongs. Intended use: If a replica operation uses this decorator, it will block actions on all share replicas of the share until the named lock is free. This is used to protect concurrent operations on replicas of the same share e.g. promote ReplicaA while deleting ReplicaB, both belonging to the same share. """ def wrapped(*args, **kwargs): share_id = kwargs.get('share_id') @utils.synchronized("%s" % share_id, external=True) def locked_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_operation(*args, **kwargs) return wrapped def add_hooks(f): def wrapped(self, *args, **kwargs): if not self.hooks: return f(self, *args, **kwargs) pre_hook_results = [] for hook in self.hooks: pre_hook_results.append( hook.execute_pre_hook( func_name=f.__name__, *args, **kwargs)) wrapped_func_results = f(self, *args, **kwargs) for i, hook in enumerate(self.hooks): hook.execute_post_hook( func_name=f.__name__, driver_action_results=wrapped_func_results, pre_hook_data=pre_hook_results[i], *args, **kwargs) return wrapped_func_results return wrapped class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" RPC_API_VERSION = '1.11' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" self.configuration = manila.share.configuration.Configuration( share_manager_opts, config_group=service_name) self._verify_unused_share_server_cleanup_interval() super(ShareManager, self).__init__(service_name='share', *args, **kwargs) if not share_driver: share_driver = self.configuration.share_driver if share_driver in MAPPING: msg_args = {'old': share_driver, 'new': MAPPING[share_driver]} LOG.warning(_LW("Driver path %(old)s is deprecated, update your " "configuration to the new path %(new)s"), msg_args) share_driver = MAPPING[share_driver] ctxt = context.get_admin_context() private_storage = drivers_private_data.DriverPrivateData( context=ctxt, backend_host=self.host, config_group=self.configuration.config_group ) self.driver = importutils.import_object( share_driver, private_storage=private_storage, configuration=self.configuration, ) self.access_helper = access.ShareInstanceAccess(self.db, self.driver) self.hooks = [] self._init_hook_drivers() def _init_hook_drivers(self): # Try to initialize hook driver(s). hook_drivers = self.configuration.safe_get("hook_drivers") for hook_driver in hook_drivers: self.hooks.append( importutils.import_object( hook_driver, configuration=self.configuration, host=self.host, ) ) def _ensure_share_instance_has_pool(self, ctxt, share_instance): pool = share_utils.extract_host(share_instance['host'], 'pool') if pool is None: # No pool name encoded in host, so this is a legacy # share created before pool is introduced, ask # driver to provide pool info if it has such # knowledge and update the DB. try: pool = self.driver.get_pool(share_instance) except Exception as err: LOG.error(_LE("Failed to fetch pool name for share: " "%(share)s. Error: %(error)s."), {'share': share_instance['id'], 'error': err}) return if pool: new_host = share_utils.append_host( share_instance['host'], pool) self.db.share_update( ctxt, share_instance['id'], {'host': new_host}) return pool @add_hooks def init_host(self): """Initialization for a standalone service.""" ctxt = context.get_admin_context() try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception as e: LOG.exception( _LE("Error encountered during initialization of driver " "'%(name)s' on '%(host)s' host. %(exc)s"), { "name": self.driver.__class__.__name__, "host": self.host, "exc": e, } ) self.driver.initialized = False # we don't want to continue since we failed # to initialize the driver correctly. return else: self.driver.initialized = True share_instances = self.db.share_instances_get_all_by_host(ctxt, self.host) LOG.debug("Re-exporting %s shares", len(share_instances)) for share_instance in share_instances: share_ref = self.db.share_get(ctxt, share_instance['share_id']) if share_ref.is_busy: LOG.info( _LI("Share instance %(id)s: skipping export, " "because it is busy with an active task: %(task)s."), {'id': share_instance['id'], 'task': share_ref['task_state']}, ) continue if share_instance['status'] != constants.STATUS_AVAILABLE: LOG.info( _LI("Share instance %(id)s: skipping export, " "because it has '%(status)s' status."), {'id': share_instance['id'], 'status': share_instance['status']}, ) continue self._ensure_share_instance_has_pool(ctxt, share_instance) share_server = self._get_share_server(ctxt, share_instance) share_instance = self.db.share_instance_get( ctxt, share_instance['id'], with_share_data=True) try: export_locations = self.driver.ensure_share( ctxt, share_instance, share_server=share_server) except Exception as e: LOG.error( _LE("Caught exception trying ensure share '%(s_id)s'. " "Exception: \n%(e)s."), {'s_id': share_instance['id'], 'e': six.text_type(e)}, ) continue if export_locations: self.db.share_export_locations_update( ctxt, share_instance['id'], export_locations) if share_instance['access_rules_status'] == ( constants.STATUS_OUT_OF_SYNC): try: self.access_helper.update_access_rules( ctxt, share_instance['id'], share_server=share_server) except Exception as e: LOG.error( _LE("Unexpected error occurred while updating access " "rules for share instance %(s_id)s. " "Exception: \n%(e)s."), {'s_id': share_instance['id'], 'e': six.text_type(e)}, ) self.publish_service_capabilities(ctxt) def _provide_share_server_for_share(self, context, share_network_id, share_instance, snapshot=None, consistency_group=None): """Gets or creates share_server and updates share with its id. Active share_server can be deleted if there are no dependent shares on it. So we need avoid possibility to delete share_server in time gap between reaching active state for share_server and setting up share_server_id for share. It is possible, for example, with first share creation, which starts share_server creation. For this purpose used shared lock between this method and the one with deletion of share_server. :param context: Current context :param share_network_id: Share network where existing share server should be found or created. If share_network_id is None method use share_network_id from provided snapshot. :param share_instance: Share Instance model :param snapshot: Optional -- Snapshot model :returns: dict, dict -- first value is share_server, that has been chosen for share schedule. Second value is share updated with share_server_id. """ if not (share_network_id or snapshot): msg = _("'share_network_id' parameter or 'snapshot'" " should be provided. ") raise ValueError(msg) parent_share_server = None def error(msg, *args): LOG.error(msg, *args) self.db.share_instance_update(context, share_instance['id'], {'status': constants.STATUS_ERROR}) if snapshot: parent_share_server_id = ( snapshot['share']['instance']['share_server_id']) try: parent_share_server = self.db.share_server_get( context, parent_share_server_id) except exception.ShareServerNotFound: with excutils.save_and_reraise_exception(): error(_LE("Parent share server %s does not exist."), parent_share_server_id) if parent_share_server['status'] != constants.STATUS_ACTIVE: error_params = { 'id': parent_share_server_id, 'status': parent_share_server['status'], } error(_LE("Parent share server %(id)s has invalid status " "'%(status)s'."), error_params) raise exception.InvalidShareServer( share_server_id=parent_share_server ) if parent_share_server and not share_network_id: share_network_id = parent_share_server['share_network_id'] def get_available_share_servers(): if parent_share_server: return [parent_share_server] else: return ( self.db.share_server_get_all_by_host_and_share_net_valid( context, self.host, share_network_id) ) @utils.synchronized("share_manager_%s" % share_network_id, external=True) def _provide_share_server_for_share(): try: available_share_servers = get_available_share_servers() except exception.ShareServerNotFound: available_share_servers = None compatible_share_server = None if available_share_servers: try: compatible_share_server = ( self.driver.choose_share_server_compatible_with_share( context, available_share_servers, share_instance, snapshot=snapshot.instance if snapshot else None, consistency_group=consistency_group ) ) except Exception as e: with excutils.save_and_reraise_exception(): error(_LE("Cannot choose compatible share server: %s"), e) if not compatible_share_server: compatible_share_server = self.db.share_server_create( context, { 'host': self.host, 'share_network_id': share_network_id, 'status': constants.STATUS_CREATING } ) msg = ("Using share_server %(share_server)s for share instance" " %(share_instance_id)s") LOG.debug(msg, { 'share_server': compatible_share_server['id'], 'share_instance_id': share_instance['id'] }) share_instance_ref = self.db.share_instance_update( context, share_instance['id'], {'share_server_id': compatible_share_server['id']}, with_share_data=True ) if compatible_share_server['status'] == constants.STATUS_CREATING: # Create share server on backend with data from db. compatible_share_server = self._setup_server( context, compatible_share_server) LOG.info(_LI("Share server created successfully.")) else: LOG.info(_LI("Used preexisting share server " "'%(share_server_id)s'"), {'share_server_id': compatible_share_server['id']}) return compatible_share_server, share_instance_ref return _provide_share_server_for_share() def _provide_share_server_for_cg(self, context, share_network_id, cg_ref, cgsnapshot=None): """Gets or creates share_server and updates share with its id. Active share_server can be deleted if there are no dependent shares on it. So we need avoid possibility to delete share_server in time gap between reaching active state for share_server and setting up share_server_id for share. It is possible, for example, with first share creation, which starts share_server creation. For this purpose used shared lock between this method and the one with deletion of share_server. :param context: Current context :param share_network_id: Share network where existing share server should be found or created. If share_network_id is None method use share_network_id from provided snapshot. :param cg_ref: Consistency Group model :param cgsnapshot: Optional -- CGSnapshot model :returns: dict, dict -- first value is share_server, that has been chosen for consistency group schedule. Second value is consistency group updated with share_server_id. """ if not (share_network_id or cgsnapshot): msg = _("'share_network_id' parameter or 'snapshot'" " should be provided. ") raise exception.InvalidInput(reason=msg) def error(msg, *args): LOG.error(msg, *args) self.db.consistency_group_update( context, cg_ref['id'], {'status': constants.STATUS_ERROR}) @utils.synchronized("share_manager_%s" % share_network_id, external=True) def _provide_share_server_for_cg(): try: available_share_servers = ( self.db.share_server_get_all_by_host_and_share_net_valid( context, self.host, share_network_id)) except exception.ShareServerNotFound: available_share_servers = None compatible_share_server = None if available_share_servers: try: compatible_share_server = ( self.driver.choose_share_server_compatible_with_cg( context, available_share_servers, cg_ref, cgsnapshot=cgsnapshot ) ) except Exception as e: with excutils.save_and_reraise_exception(): error(_LE("Cannot choose compatible share-server: %s"), e) if not compatible_share_server: compatible_share_server = self.db.share_server_create( context, { 'host': self.host, 'share_network_id': share_network_id, 'status': constants.STATUS_CREATING } ) msg = ("Using share_server %(share_server)s for consistency " "group %(cg_id)s") LOG.debug(msg, { 'share_server': compatible_share_server['id'], 'cg_id': cg_ref['id'] }) updated_cg = self.db.consistency_group_update( context, cg_ref['id'], {'share_server_id': compatible_share_server['id']}, ) if compatible_share_server['status'] == constants.STATUS_CREATING: # Create share server on backend with data from db. compatible_share_server = self._setup_server( context, compatible_share_server) LOG.info(_LI("Share server created successfully.")) else: LOG.info(_LI("Used preexisting share server " "'%(share_server_id)s'"), {'share_server_id': compatible_share_server['id']}) return compatible_share_server, updated_cg return _provide_share_server_for_cg() def _get_share_server(self, context, share_instance): if share_instance['share_server_id']: return self.db.share_server_get( context, share_instance['share_server_id']) else: return None @utils.require_driver_initialized def migration_get_info(self, context, share_instance_id): share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) share_server = None if share_instance.get('share_server_id'): share_server = self.db.share_server_get( context, share_instance['share_server_id']) return self.driver.migration_get_info(context, share_instance, share_server) @utils.require_driver_initialized def migration_get_driver_info(self, context, share_instance_id): share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) share_server = None if share_instance.get('share_server_id'): share_server = self.db.share_server_get( context, share_instance['share_server_id']) return self.driver.migration_get_driver_info(context, share_instance, share_server) @utils.require_driver_initialized def migration_start(self, context, share_id, host, force_host_copy, notify=True): """Migrates a share from current host to another host.""" LOG.debug("Entered migration_start method for share %s.", share_id) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) rpcapi = share_rpcapi.ShareAPI() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) moved = False self.db.share_instance_update(context, share_instance['id'], {'status': constants.STATUS_MIGRATING}) if not force_host_copy: try: dest_driver_migration_info = rpcapi.migration_get_driver_info( context, share_instance) share_server = self._get_share_server(context.elevated(), share_instance) LOG.debug("Calling driver migration for share %s.", share_id) self.db.share_update( context, share_id, {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) moved, model_update = self.driver.migration_start( context, share_instance, share_server, host, dest_driver_migration_info, notify) if moved and not notify: self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) # NOTE(ganso): Here we are allowing the driver to perform # changes even if it has not performed migration. While this # scenario may not be valid, I do not think it should be # forcefully prevented. if model_update: self.db.share_instance_update( context, share_instance['id'], model_update) except Exception as e: msg = six.text_type(e) LOG.exception(msg) LOG.warning(_LW("Driver did not migrate share %s. Proceeding " "with generic migration approach.") % share_id) if not moved: try: LOG.debug("Starting generic migration " "for share %s.", share_id) self._migration_start_generic(context, share_ref, share_instance, host, notify) except Exception: msg = _("Generic migration failed for share %s.") % share_id LOG.exception(msg) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.db.share_instance_update( context, share_instance['id'], {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) def _migration_start_generic(self, context, share, share_instance, host, notify): rpcapi = share_rpcapi.ShareAPI() helper = migration.ShareMigrationHelper(context, self.db, share) share_server = self._get_share_server(context.elevated(), share_instance) readonly_support = self.driver.configuration.safe_get( 'migration_readonly_rules_support') helper.change_to_read_only(share_instance, share_server, readonly_support, self.driver) try: new_share_instance = helper.create_instance_and_wait( share, share_instance, host) self.db.share_instance_update( context, new_share_instance['id'], {'status': constants.STATUS_MIGRATING_TO}) except Exception: msg = _("Failed to create instance on destination " "backend during migration of share %s.") % share['id'] LOG.exception(msg) helper.cleanup_access_rules(share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) ignore_list = self.driver.configuration.safe_get( 'migration_ignore_files') data_rpc = data_rpcapi.DataAPI() try: src_migration_info = self.driver.migration_get_info( context, share_instance, share_server) dest_migration_info = rpcapi.migration_get_info( context, new_share_instance) LOG.debug("Time to start copying in migration" " for share %s.", share['id']) data_rpc.migration_start( context, share['id'], ignore_list, share_instance['id'], new_share_instance['id'], src_migration_info, dest_migration_info, notify) except Exception: msg = _("Failed to obtain migration info from backends or" " invoking Data Service for migration of " "share %s.") % share['id'] LOG.exception(msg) helper.cleanup_new_instance(new_share_instance) helper.cleanup_access_rules(share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) @utils.require_driver_initialized def migration_complete(self, context, share_id, share_instance_id, new_share_instance_id): LOG.info(_LI("Received request to finish Share Migration for " "share %s."), share_id) share_ref = self.db.share_get(context, share_id) if share_ref['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): rpcapi = share_rpcapi.ShareAPI() share_instance = self._get_share_instance(context, share_ref) share_server = self._get_share_server(context, share_instance) try: dest_driver_migration_info = rpcapi.migration_get_driver_info( context, share_instance) model_update = self.driver.migration_complete( context, share_instance, share_server, dest_driver_migration_info) if model_update: self.db.share_instance_update( context, share_instance['id'], model_update) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) except Exception: msg = _("Driver migration completion failed for" " share %s.") % share_id LOG.exception(msg) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) raise exception.ShareMigrationFailed(reason=msg) else: try: self._migration_complete( context, share_ref, share_instance_id, new_share_instance_id) except Exception: msg = _("Generic migration completion failed for" " share %s.") % share_id LOG.exception(msg) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) def _migration_complete(self, context, share_ref, share_instance_id, new_share_instance_id): share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) new_share_instance = self.db.share_instance_get( context, new_share_instance_id, with_share_data=True) share_server = self._get_share_server(context, share_instance) helper = migration.ShareMigrationHelper(context, self.db, share_ref) task_state = share_ref['task_state'] if task_state in (constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_DATA_COPYING_CANCELLED): msg = _("Data copy of generic migration for share %s has not " "completed successfully.") % share_ref['id'] LOG.warning(msg) helper.cleanup_new_instance(new_share_instance) helper.cleanup_access_rules(share_instance, share_server, self.driver) if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) LOG.info(_LI("Share Migration for share %s" " was cancelled."), share_ref['id']) return else: raise exception.ShareMigrationFailed(reason=msg) elif task_state != constants.TASK_STATE_DATA_COPYING_COMPLETED: msg = _("Data copy for migration of share %s not completed" " yet.") % share_ref['id'] LOG.error(msg) raise exception.ShareMigrationFailed(reason=msg) try: helper.apply_new_access_rules(new_share_instance) except Exception: msg = _("Failed to apply new access rules during migration " "of share %s.") % share_ref['id'] LOG.exception(msg) helper.cleanup_new_instance(new_share_instance) helper.cleanup_access_rules(share_instance, share_server, self.driver) raise exception.ShareMigrationFailed(reason=msg) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) self.db.share_instance_update(context, new_share_instance_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_instance_update(context, share_instance_id, {'status': constants.STATUS_INACTIVE}) helper.delete_instance_and_wait(share_instance) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) LOG.info(_LI("Share Migration for share %s" " completed successfully."), share_ref['id']) @utils.require_driver_initialized def migration_cancel(self, context, share_id): share_ref = self.db.share_get(context, share_id) # Confirm that it is driver migration scenario if share_ref['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_server = None if share_ref.instance.get('share_server_id'): share_server = self.db.share_server_get( context, share_ref.instance['share_server_id']) share_rpc = share_rpcapi.ShareAPI() driver_migration_info = share_rpc.migration_get_driver_info( context, share_ref.instance) self.driver.migration_cancel( context, share_ref.instance, share_server, driver_migration_info) else: msg = _("Driver is not performing migration for" " share %s") % share_id raise exception.InvalidShare(reason=msg) @utils.require_driver_initialized def migration_get_progress(self, context, share_id): share_ref = self.db.share_get(context, share_id) # Confirm that it is driver migration scenario if share_ref['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_server = None if share_ref.instance.get('share_server_id'): share_server = self.db.share_server_get( context, share_ref.instance['share_server_id']) share_rpc = share_rpcapi.ShareAPI() driver_migration_info = share_rpc.migration_get_driver_info( context, share_ref.instance) return self.driver.migration_get_progress( context, share_ref.instance, share_server, driver_migration_info) else: msg = _("Driver is not performing migration for" " share %s") % share_id raise exception.InvalidShare(reason=msg) def _get_share_instance(self, context, share): if isinstance(share, six.string_types): id = share else: id = share.instance['id'] return self.db.share_instance_get(context, id, with_share_data=True) @add_hooks @utils.require_driver_initialized def create_share_instance(self, context, share_instance_id, request_spec=None, filter_properties=None, snapshot_id=None): """Creates a share instance.""" context = context.elevated() share_instance = self._get_share_instance(context, share_instance_id) share_network_id = share_instance.get('share_network_id', None) if not share_instance['availability_zone']: share_instance = self.db.share_instance_update( context, share_instance_id, {'availability_zone': CONF.storage_availability_zone}, with_share_data=True ) if share_network_id and not self.driver.driver_handles_share_servers: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR}) raise exception.ManilaException(_( "Creation of share instance %s failed: driver does not expect " "share-network to be provided with current " "configuration.") % share_instance_id) if snapshot_id is not None: snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) parent_share_server_id = ( snapshot_ref['share']['instance']['share_server_id']) else: snapshot_ref = None parent_share_server_id = None consistency_group_ref = None if share_instance.get('consistency_group_id'): consistency_group_ref = self.db.consistency_group_get( context, share_instance['consistency_group_id']) if share_network_id or parent_share_server_id: try: share_server, share_instance = ( self._provide_share_server_for_share( context, share_network_id, share_instance, snapshot=snapshot_ref, consistency_group=consistency_group_ref ) ) except Exception: with excutils.save_and_reraise_exception(): error = _LE("Creation of share instance %s failed: " "failed to get share server.") LOG.error(error, share_instance_id) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) else: share_server = None try: if snapshot_ref: export_locations = self.driver.create_share_from_snapshot( context, share_instance, snapshot_ref.instance, share_server=share_server) else: export_locations = self.driver.create_share( context, share_instance, share_server=share_server) self.db.share_export_locations_update( context, share_instance['id'], export_locations) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Share instance %s failed on creation."), share_instance_id) detail_data = getattr(e, 'detail_data', {}) def get_export_location(details): if not isinstance(details, dict): return None return details.get('export_locations', details.get('export_location')) export_locations = get_export_location(detail_data) if export_locations: self.db.share_export_locations_update( context, share_instance['id'], export_locations) else: LOG.warning(_LW('Share instance information in exception ' 'can not be written to db because it ' 'contains %s and it is not a dictionary.'), detail_data) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) else: LOG.info(_LI("Share instance %s created successfully."), share_instance_id) share = self.db.share_get(context, share_instance['share_id']) updates = { 'status': constants.STATUS_AVAILABLE, 'launched_at': timeutils.utcnow(), } if share.get('replication_type'): updates['replica_state'] = constants.REPLICA_STATE_ACTIVE self.db.share_instance_update(context, share_instance_id, updates) def _update_share_replica_access_rules_state(self, context, share_replica_id, state): """Update the access_rules_status for the share replica.""" self.db.share_instance_update_access_status( context, share_replica_id, state) def _get_replica_snapshots_for_snapshot(self, context, snapshot_id, active_replica_id, share_replica_id, with_share_data=True): """Return dict of snapshot instances of active and replica instances. This method returns a dict of snapshot instances for snapshot referred to by snapshot_id. The dict contains the snapshot instance pertaining to the 'active' replica and the snapshot instance pertaining to the replica referred to by share_replica_id. """ filters = { 'snapshot_ids': snapshot_id, 'share_instance_ids': (share_replica_id, active_replica_id), } instance_list = self.db.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=with_share_data) snapshots = { 'active_replica_snapshot': self._get_snapshot_instance_dict( context, list(filter(lambda x: x['share_instance_id'] == active_replica_id, instance_list))[0]), 'share_replica_snapshot': self._get_snapshot_instance_dict( context, list(filter(lambda x: x['share_instance_id'] == share_replica_id, instance_list))[0]), } return snapshots @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def create_share_replica(self, context, share_replica_id, share_id=None, request_spec=None, filter_properties=None): """Create a share replica.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) if not share_replica['availability_zone']: share_replica = self.db.share_replica_update( context, share_replica['id'], {'availability_zone': CONF.storage_availability_zone}, with_share_data=True ) _active_replica = ( self.db.share_replicas_get_available_active_replica( context, share_replica['share_id'], with_share_data=True, with_share_server=True)) if not _active_replica: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) msg = _("An 'active' replica must exist in 'available' " "state to create a new replica for share %s.") raise exception.ReplicationException( reason=msg % share_replica['share_id']) # We need the share_network_id in case of # driver_handles_share_server=True share_network_id = share_replica.get('share_network_id', None) if (share_network_id and not self.driver.driver_handles_share_servers): self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) raise exception.InvalidDriverMode( "Driver does not expect share-network to be provided " "with current configuration.") if share_network_id: try: share_server, share_replica = ( self._provide_share_server_for_share( context, share_network_id, share_replica) ) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to get share server " "for share replica creation.")) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) else: share_server = None # Map the existing access rules for the share to # the replica in the DB. share_access_rules = self.db.share_instance_access_copy( context, share_replica['share_id'], share_replica['id']) # Get snapshots for the share. share_snapshots = self.db.share_snapshot_get_all_for_share( context, share_id) # Get the required data for snapshots that have 'aggregate_status' # set to 'available'. available_share_snapshots = [ self._get_replica_snapshots_for_snapshot( context, x['id'], _active_replica['id'], share_replica_id) for x in share_snapshots if x['aggregate_status'] == constants.STATUS_AVAILABLE] replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] share_replica = self._get_share_replica_dict(context, share_replica) try: replica_ref = self.driver.create_replica( context, replica_list, share_replica, share_access_rules, available_share_snapshots, share_server=share_server) or {} except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Share replica %s failed on creation."), share_replica['id']) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self._update_share_replica_access_rules_state( context, share_replica['id'], constants.STATUS_ERROR) if replica_ref.get('export_locations'): if isinstance(replica_ref.get('export_locations'), list): self.db.share_export_locations_update( context, share_replica['id'], replica_ref.get('export_locations')) else: msg = _LW('Invalid export locations passed to the share ' 'manager.') LOG.warning(msg) if replica_ref.get('replica_state'): self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': replica_ref.get('replica_state')}) if replica_ref.get('access_rules_status'): self._update_share_replica_access_rules_state( context, share_replica['id'], replica_ref.get('access_rules_status')) else: self._update_share_replica_access_rules_state( context, share_replica['id'], constants.STATUS_ACTIVE) LOG.info(_LI("Share replica %s created successfully."), share_replica['id']) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def delete_share_replica(self, context, share_replica_id, share_id=None, force=False): """Delete a share replica.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) # Grab all the snapshot instances that belong to this replica. replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica_id}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] share_server = self._get_share_server(context, share_replica) share_replica = self._get_share_replica_dict(context, share_replica) try: self.access_helper.update_access_rules( context, share_replica_id, delete_rules="all", share_server=share_server ) except Exception: with excutils.save_and_reraise_exception() as exc_context: # Set status to 'error' from 'deleting' since # access_rules_status has been set to 'error'. self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR}) if force: msg = _("The driver was unable to delete access rules " "for the replica: %s. Will attempt to delete " "the replica anyway.") LOG.exception(msg % share_replica['id']) exc_context.reraise = False try: self.driver.delete_replica( context, replica_list, replica_snapshots, share_replica, share_server=share_server) except Exception: with excutils.save_and_reraise_exception() as exc_context: if force: msg = _("The driver was unable to delete the share " "replica: %s on the backend. Since " "this operation is forced, the replica will be " "deleted from Manila's database. A cleanup on " "the backend may be necessary.") LOG.exception(msg, share_replica['id']) exc_context.reraise = False else: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR_DELETING, 'replica_state': constants.STATUS_ERROR}) for replica_snapshot in replica_snapshots: self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) self.db.share_replica_delete(context, share_replica['id']) LOG.info(_LI("Share replica %s deleted successfully."), share_replica['id']) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def promote_share_replica(self, context, share_replica_id, share_id=None): """Promote a share replica to active state.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) share_server = self._get_share_server(context, share_replica) # Get list of all replicas for share replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) try: old_active_replica = list(filter( lambda r: ( r['replica_state'] == constants.REPLICA_STATE_ACTIVE), replica_list))[0] except IndexError: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE}) msg = _("Share %(share)s has no replica with 'replica_state' " "set to %(state)s. Promoting %(replica)s is not " "possible.") raise exception.ReplicationException( reason=msg % {'share': share_replica['share_id'], 'state': constants.REPLICA_STATE_ACTIVE, 'replica': share_replica['id']}) access_rules = self.db.share_access_get_all_for_share( context, share_replica['share_id']) replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] share_replica = self._get_share_replica_dict(context, share_replica) try: updated_replica_list = ( self.driver.promote_replica( context, replica_list, share_replica, access_rules, share_server=share_server) ) except Exception: with excutils.save_and_reraise_exception(): # (NOTE) gouthamr: If the driver throws an exception at # this stage, there is a good chance that the replicas are # somehow altered on the backend. We loop through the # replicas and set their 'status's to 'error' and # leave the 'replica_state' unchanged. This also changes the # 'status' of the replica that failed to promote to 'error' as # before this operation. The backend may choose to update # the actual replica_state during the replica_monitoring # stage. updates = {'status': constants.STATUS_ERROR} for replica_ref in replica_list: self.db.share_replica_update( context, replica_ref['id'], updates) # Set any 'creating' snapshots on the currently active replica to # 'error' since we cannot guarantee they will finish 'creating'. active_replica_snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica['id']}) ) for instance in active_replica_snapshot_instances: if instance['status'] in (constants.STATUS_CREATING, constants.STATUS_DELETING): msg = _LI("The replica snapshot instance %(instance)s was " "in %(state)s. Since it was not in %(available)s " "state when the replica was promoted, it will be " "set to %(error)s.") payload = { 'instance': instance['id'], 'state': instance['status'], 'available': constants.STATUS_AVAILABLE, 'error': constants.STATUS_ERROR, } LOG.info(msg, payload) self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR}) if not updated_replica_list: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE}) self.db.share_replica_update( context, old_active_replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) else: for updated_replica in updated_replica_list: updated_export_locs = updated_replica.get( 'export_locations') if(updated_export_locs is not None and isinstance(updated_export_locs, list)): self.db.share_export_locations_update( context, updated_replica['id'], updated_export_locs) updated_replica_state = updated_replica.get( 'replica_state') updates = {} # Change the promoted replica's status from 'available' to # 'replication_change'. if updated_replica['id'] == share_replica['id']: updates['status'] = constants.STATUS_AVAILABLE if updated_replica_state == constants.STATUS_ERROR: updates['status'] = constants.STATUS_ERROR if updated_replica_state is not None: updates['replica_state'] = updated_replica_state if updates: self.db.share_replica_update( context, updated_replica['id'], updates) if updated_replica.get('access_rules_status'): self._update_share_replica_access_rules_state( context, share_replica['id'], updated_replica.get('access_rules_status')) LOG.info(_LI("Share replica %s: promoted to active state " "successfully."), share_replica['id']) @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval) @utils.require_driver_initialized def periodic_share_replica_update(self, context): LOG.debug("Updating status of share replica instances.") replicas = self.db.share_replicas_get_all(context, with_share_data=True) # Filter only non-active replicas belonging to this backend def qualified_replica(r): return (share_utils.extract_host(r['host']) == share_utils.extract_host(self.host)) replicas = list(filter(lambda x: qualified_replica(x), replicas)) for replica in replicas: self._share_replica_update( context, replica, share_id=replica['share_id']) @add_hooks @utils.require_driver_initialized def update_share_replica(self, context, share_replica_id, share_id=None): """Initiated by the force_update API.""" share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) self._share_replica_update(context, share_replica, share_id=share_id) @locked_share_replica_operation def _share_replica_update(self, context, share_replica, share_id=None): share_server = self._get_share_server(context, share_replica) # Re-grab the replica: try: share_replica = self.db.share_replica_get( context, share_replica['id'], with_share_data=True, with_share_server=True) except exception.ShareReplicaNotFound: # Replica may have been deleted, nothing to do here return # We don't poll for replicas that are busy in some operation, # or if they are the 'active' instance. if (share_replica['status'] in constants.TRANSITIONAL_STATUSES or share_replica['replica_state'] == constants.REPLICA_STATE_ACTIVE): return access_rules = self.db.share_access_get_all_for_share( context, share_replica['share_id']) LOG.debug("Updating status of share share_replica %s: ", share_replica['id']) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) _active_replica = [x for x in replica_list if x['replica_state'] == constants.REPLICA_STATE_ACTIVE][0] # Get snapshots for the share. share_snapshots = self.db.share_snapshot_get_all_for_share( context, share_id) # Get the required data for snapshots that have 'aggregate_status' # set to 'available'. available_share_snapshots = [ self._get_replica_snapshots_for_snapshot( context, x['id'], _active_replica['id'], share_replica['id']) for x in share_snapshots if x['aggregate_status'] == constants.STATUS_AVAILABLE] replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] share_replica = self._get_share_replica_dict(context, share_replica) try: replica_state = self.driver.update_replica_state( context, replica_list, share_replica, access_rules, available_share_snapshots, share_server=share_server) except Exception: msg = _LE("Driver error when updating replica " "state for replica %s.") LOG.exception(msg, share_replica['id']) self.db.share_replica_update( context, share_replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR}) return if replica_state in (constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR): self.db.share_replica_update(context, share_replica['id'], {'replica_state': replica_state}) elif replica_state: msg = (_LW("Replica %(id)s cannot be set to %(state)s " "through update call.") % {'id': share_replica['id'], 'state': replica_state}) LOG.warning(msg) @add_hooks @utils.require_driver_initialized def manage_share(self, context, share_id, driver_options): context = context.elevated() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) project_id = share_ref['project_id'] try: if self.driver.driver_handles_share_servers: msg = _("Manage share is not supported for " "driver_handles_share_servers=True mode.") raise exception.InvalidDriverMode(driver_mode=msg) driver_mode = share_types.get_share_type_extra_specs( share_instance['share_type_id'], constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS) if strutils.bool_from_string(driver_mode): msg = _("%(mode)s != False") % { 'mode': constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS } raise exception.ManageExistingShareTypeMismatch(reason=msg) share_update = ( self.driver.manage_existing(share_instance, driver_options) or {} ) if not share_update.get('size'): msg = _("Driver cannot calculate share size.") raise exception.InvalidShare(reason=msg) self._update_quota_usages(context, project_id, { "shares": 1, "gigabytes": share_update['size'], }) share_update.update({ 'status': constants.STATUS_AVAILABLE, 'launched_at': timeutils.utcnow(), 'availability_zone': CONF.storage_availability_zone, }) # NOTE(vponomaryov): we should keep only those export locations # that driver has calculated to avoid incompatibilities with one # provided by user. if 'export_locations' in share_update: self.db.share_export_locations_update( context, share_instance['id'], share_update.pop('export_locations'), delete=True) self.db.share_update(context, share_id, share_update) except Exception: # NOTE(vponomaryov): set size as 1 because design expects size # to be set, it also will allow us to handle delete/unmanage # operations properly with this errored share according to quotas. self.db.share_update( context, share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) raise @add_hooks @utils.require_driver_initialized def manage_snapshot(self, context, snapshot_id, driver_options): if self.driver.driver_handles_share_servers: msg = _("Manage snapshot is not supported for " "driver_handles_share_servers=True mode.") # NOTE(vponomaryov): set size as 1 because design expects size # to be set, it also will allow us to handle delete/unmanage # operations properly with this errored snapshot according to # quotas. self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) raise exception.InvalidDriverMode(driver_mode=msg) context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot_ref['share']) if share_server: msg = _("Manage snapshot is not supported for " "share snapshots with share servers.") # NOTE(vponomaryov): set size as 1 because design expects size # to be set, it also will allow us to handle delete/unmanage # operations properly with this errored snapshot according to # quotas. self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) raise exception.InvalidShareSnapshot(reason=msg) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) project_id = snapshot_ref['project_id'] try: snapshot_update = ( self.driver.manage_existing_snapshot( snapshot_instance, driver_options) or {} ) if not snapshot_update.get('size'): snapshot_update['size'] = snapshot_ref['share']['size'] LOG.warning(_LI("Cannot get the size of the snapshot " "%(snapshot_id)s. Using the size of " "the share instead."), {'snapshot_id': snapshot_id}) self._update_quota_usages(context, project_id, { "snapshots": 1, "snapshot_gigabytes": snapshot_update['size'], }) snapshot_update.update({ 'status': constants.STATUS_AVAILABLE, 'progress': '100%', }) snapshot_update.pop('id', None) self.db.share_snapshot_update(context, snapshot_id, snapshot_update) except Exception: # NOTE(vponomaryov): set size as 1 because design expects size # to be set, it also will allow us to handle delete/unmanage # operations properly with this errored snapshot according to # quotas. self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) raise def _update_quota_usages(self, context, project_id, usages): user_id = context.user_id for resource, usage in usages.items(): try: current_usage = self.db.quota_usage_get( context, project_id, resource, user_id) self.db.quota_usage_update( context, project_id, user_id, resource, in_use=current_usage['in_use'] + usage) except exception.QuotaUsageNotFound: self.db.quota_usage_create(context, project_id, user_id, resource, usage) @add_hooks @utils.require_driver_initialized def unmanage_share(self, context, share_id): context = context.elevated() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) share_server = self._get_share_server(context, share_instance) project_id = share_ref['project_id'] def share_manage_set_error_status(msg, exception): status = {'status': constants.STATUS_UNMANAGE_ERROR} self.db.share_update(context, share_id, status) LOG.error(msg, six.text_type(exception)) try: if self.driver.driver_handles_share_servers: msg = _("Unmanage share is not supported for " "driver_handles_share_servers=True mode.") raise exception.InvalidShare(reason=msg) if share_server: msg = _("Unmanage share is not supported for " "shares with share servers.") raise exception.InvalidShare(reason=msg) self.driver.unmanage(share_instance) except exception.InvalidShare as e: share_manage_set_error_status( _LE("Share can not be unmanaged: %s."), e) return try: reservations = QUOTAS.reserve(context, project_id=project_id, shares=-1, gigabytes=-share_ref['size']) QUOTAS.commit(context, reservations, project_id=project_id) except Exception as e: # Note(imalinovskiy): # Quota reservation errors here are not fatal, because # unmanage is administrator API and he/she could update user # quota usages later if it's required. LOG.warning(_LW("Failed to update quota usages: %s."), six.text_type(e)) if self.configuration.safe_get('unmanage_remove_access_rules'): try: self.access_helper.update_access_rules( context, share_instance['id'], delete_rules="all", share_server=share_server ) except Exception as e: share_manage_set_error_status( _LE("Can not remove access rules of share: %s."), e) return self.db.share_instance_delete(context, share_instance['id']) LOG.info(_LI("Share %s: unmanaged successfully."), share_id) @add_hooks @utils.require_driver_initialized def unmanage_snapshot(self, context, snapshot_id): status = {'status': constants.STATUS_UNMANAGE_ERROR} if self.driver.driver_handles_share_servers: msg = _("Unmanage snapshot is not supported for " "driver_handles_share_servers=True mode.") self.db.share_snapshot_update(context, snapshot_id, status) LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), msg) return context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot_ref['share']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) project_id = snapshot_ref['project_id'] if share_server: msg = _("Unmanage snapshot is not supported for " "share snapshots with share servers.") self.db.share_snapshot_update(context, snapshot_id, status) LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), msg) return try: self.driver.unmanage_snapshot(snapshot_instance) except exception.UnmanageInvalidShareSnapshot as e: self.db.share_snapshot_update(context, snapshot_id, status) LOG.error(_LE("Share snapshot cannot be unmanaged: %s."), e) return try: reservations = QUOTAS.reserve( context, project_id=project_id, snapshots=-1, snapshot_gigabytes=-snapshot_ref['size']) QUOTAS.commit(context, reservations, project_id=project_id) except Exception as e: # Note(imalinovskiy): # Quota reservation errors here are not fatal, because # unmanage is administrator API and he/she could update user # quota usages later if it's required. LOG.warning(_LW("Failed to update quota usages: %s."), e) self.db.share_snapshot_instance_delete( context, snapshot_instance['id']) @add_hooks @utils.require_driver_initialized def delete_share_instance(self, context, share_instance_id, force=False): """Delete a share instance.""" context = context.elevated() share_instance = self._get_share_instance(context, share_instance_id) share_server = self._get_share_server(context, share_instance) try: self.access_helper.update_access_rules( context, share_instance_id, delete_rules="all", share_server=share_server ) except exception.ShareResourceNotFound: LOG.warning(_LW("Share instance %s does not exist in the " "backend."), share_instance_id) except Exception: with excutils.save_and_reraise_exception() as exc_context: if force: msg = _LE("The driver was unable to delete access rules " "for the instance: %s. Will attempt to delete " "the instance anyway.") LOG.error(msg, share_instance_id) exc_context.reraise = False else: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR_DELETING}) try: self.driver.delete_share(context, share_instance, share_server=share_server) except exception.ShareResourceNotFound: LOG.warning(_LW("Share instance %s does not exist in the " "backend."), share_instance_id) except Exception: with excutils.save_and_reraise_exception() as exc_context: if force: msg = _LE("The driver was unable to delete the share " "instance: %s on the backend. Since this " "operation is forced, the instance will be " "deleted from Manila's database. A cleanup on " "the backend may be necessary.") LOG.error(msg, share_instance_id) exc_context.reraise = False else: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR_DELETING}) self.db.share_instance_delete(context, share_instance_id) LOG.info(_LI("Share instance %s: deleted successfully."), share_instance_id) if CONF.delete_share_server_with_last_share: share_server = self._get_share_server(context, share_instance) if share_server and len(share_server.share_instances) == 0: LOG.debug("Scheduled deletion of share-server " "with id '%s' automatically by " "deletion of last share.", share_server['id']) self.delete_share_server(context, share_server) @periodic_task.periodic_task(spacing=600) @utils.require_driver_initialized def delete_free_share_servers(self, ctxt): if not (self.driver.driver_handles_share_servers and self.configuration.automatic_share_server_cleanup): return LOG.info(_LI("Check for unused share servers to delete.")) updated_before = timeutils.utcnow() - datetime.timedelta( minutes=self.configuration.unused_share_server_cleanup_interval) servers = self.db.share_server_get_all_unused_deletable(ctxt, self.host, updated_before) for server in servers: self.delete_share_server(ctxt, server) @add_hooks @utils.require_driver_initialized def create_snapshot(self, context, share_id, snapshot_id): """Create snapshot for share.""" snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server( context, snapshot_ref['share']['instance']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) snapshot_instance_id = snapshot_instance['id'] try: model_update = self.driver.create_snapshot( context, snapshot_instance, share_server=share_server) if model_update: self.db.share_snapshot_instance_update( context, snapshot_instance_id, model_update) except Exception: with excutils.save_and_reraise_exception(): self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': constants.STATUS_ERROR}) self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': constants.STATUS_AVAILABLE, 'progress': '100%'} ) return snapshot_id @add_hooks @utils.require_driver_initialized def delete_snapshot(self, context, snapshot_id): """Delete share snapshot.""" context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server( context, snapshot_ref['share']['instance']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) snapshot_instance_id = snapshot_instance['id'] if context.project_id != snapshot_ref['project_id']: project_id = snapshot_ref['project_id'] else: project_id = context.project_id try: self.driver.delete_snapshot(context, snapshot_instance, share_server=share_server) except exception.ShareSnapshotIsBusy: self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': constants.STATUS_AVAILABLE}) except Exception: with excutils.save_and_reraise_exception(): self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': constants.STATUS_ERROR_DELETING}) else: self.db.share_snapshot_instance_delete( context, snapshot_instance_id) try: reservations = QUOTAS.reserve( context, project_id=project_id, snapshots=-1, snapshot_gigabytes=-snapshot_ref['size'], user_id=snapshot_ref['user_id']) except Exception: reservations = None LOG.exception(_LE("Failed to update usages deleting snapshot")) if reservations: QUOTAS.commit(context, reservations, project_id=project_id, user_id=snapshot_ref['user_id']) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def create_replicated_snapshot(self, context, snapshot_id, share_id=None): """Create a snapshot for a replicated share.""" # Grab the snapshot and replica information from the DB. snapshot = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot['share']) replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_id, with_share_data=True, with_share_server=True) ) # Make primitives to pass the information to the driver. replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] updated_instances = [] try: updated_instances = self.driver.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) or [] except Exception: with excutils.save_and_reraise_exception(): for instance in replica_snapshots: self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR}) for instance in updated_instances: if instance['status'] == constants.STATUS_AVAILABLE: instance.update({'progress': '100%'}) self.db.share_snapshot_instance_update( context, instance['id'], instance) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def delete_replicated_snapshot(self, context, snapshot_id, share_id=None, force=False): """Delete a snapshot from a replicated share.""" # Grab the replica and snapshot information from the DB. snapshot = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot['share']) replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_id, with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] deleted_instances = [] updated_instances = [] db_force_delete_msg = _('The driver was unable to delete some or all ' 'of the share replica snapshots on the ' 'backend/s. Since this operation is forced, ' 'the replica snapshots will be deleted from ' 'Manila.') try: updated_instances = self.driver.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) or [] except Exception: with excutils.save_and_reraise_exception() as e: if force: # Can delete all instances if forced. deleted_instances = replica_snapshots LOG.exception(db_force_delete_msg) e.reraise = False else: for instance in replica_snapshots: self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR_DELETING}) if not deleted_instances: if force: # Ignore model updates on 'force' delete. LOG.warning(db_force_delete_msg) deleted_instances = replica_snapshots else: deleted_instances = list(filter( lambda x: x['status'] == constants.STATUS_DELETED, updated_instances)) updated_instances = list(filter( lambda x: x['status'] != constants.STATUS_DELETED, updated_instances)) for instance in deleted_instances: self.db.share_snapshot_instance_delete(context, instance['id']) for instance in updated_instances: self.db.share_snapshot_instance_update( context, instance['id'], instance) @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval) @utils.require_driver_initialized def periodic_share_replica_snapshot_update(self, context): LOG.debug("Updating status of share replica snapshots.") transitional_statuses = (constants.STATUS_CREATING, constants.STATUS_DELETING) replicas = self.db.share_replicas_get_all(context, with_share_data=True) def qualified_replica(r): # Filter non-active replicas belonging to this backend return (share_utils.extract_host(r['host']) == share_utils.extract_host(self.host) and r['replica_state'] != constants.REPLICA_STATE_ACTIVE) host_replicas = list(filter( lambda x: qualified_replica(x), replicas)) transitional_replica_snapshots = [] # Get snapshot instances for each replica that are in 'creating' or # 'deleting' states. for replica in host_replicas: filters = { 'share_instance_ids': replica['id'], 'statuses': transitional_statuses, } replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=True) ) transitional_replica_snapshots.extend(replica_snapshots) for replica_snapshot in transitional_replica_snapshots: replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': replica_snapshot['snapshot_id']}) ) share_id = replica_snapshot['share']['share_id'] self._update_replica_snapshot( context, replica_snapshot, replica_snapshots=replica_snapshots, share_id=share_id) @locked_share_replica_operation def _update_replica_snapshot(self, context, replica_snapshot, replica_snapshots=None, share_id=None): # Re-grab the replica: try: share_replica = self.db.share_replica_get( context, replica_snapshot['share_instance_id'], with_share_data=True, with_share_server=True) replica_snapshot = self.db.share_snapshot_instance_get( context, replica_snapshot['id'], with_share_data=True) except exception.NotFound: # Replica may have been deleted, try to cleanup the snapshot # instance try: self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) except exception.ShareSnapshotInstanceNotFound: # snapshot instance has been deleted, nothing to do here pass return msg_payload = { 'snapshot_instance': replica_snapshot['id'], 'replica': share_replica['id'], } LOG.debug("Updating status of replica snapshot %(snapshot_instance)s: " "on replica: %(replica)s", msg_payload) # Grab all the replica and snapshot information. replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_replica_dict(context, r) for r in replica_list] replica_snapshots = replica_snapshots or [] # Convert data to primitives to send to the driver. replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] replica_snapshot = self._get_snapshot_instance_dict( context, replica_snapshot) share_replica = self._get_share_replica_dict(context, share_replica) share_server = share_replica['share_server'] snapshot_update = None try: snapshot_update = self.driver.update_replicated_snapshot( context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) or {} except exception.SnapshotResourceNotFound: if replica_snapshot['status'] == constants.STATUS_DELETING: LOG.info(_LI('Snapshot %(snapshot_instance)s on replica ' '%(replica)s has been deleted.'), msg_payload) self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) else: LOG.exception(_LE("Replica snapshot %s was not found on " "the backend."), replica_snapshot['id']) self.db.share_snapshot_instance_update( context, replica_snapshot['id'], {'status': constants.STATUS_ERROR}) except Exception: LOG.exception(_LE("Driver error while updating replica snapshot: " "%s"), replica_snapshot['id']) self.db.share_snapshot_instance_update( context, replica_snapshot['id'], {'status': constants.STATUS_ERROR}) if snapshot_update: snapshot_status = snapshot_update.get('status') if snapshot_status == constants.STATUS_AVAILABLE: snapshot_update['progress'] = '100%' self.db.share_snapshot_instance_update( context, replica_snapshot['id'], snapshot_update) @add_hooks @utils.require_driver_initialized def allow_access(self, context, share_instance_id, access_rules): """Allow access to some share instance.""" share_instance = self._get_share_instance(context, share_instance_id) status = share_instance['access_rules_status'] if status not in (constants.STATUS_UPDATING, constants.STATUS_UPDATING_MULTIPLE, constants.STATUS_ACTIVE): add_rules = [self.db.share_access_get(context, rule_id) for rule_id in access_rules] share_server = self._get_share_server(context, share_instance) return self.access_helper.update_access_rules( context, share_instance_id, add_rules=add_rules, share_server=share_server ) @add_hooks @utils.require_driver_initialized def deny_access(self, context, share_instance_id, access_rules): """Deny access to some share.""" delete_rules = [self.db.share_access_get(context, rule_id) for rule_id in access_rules] share_instance = self._get_share_instance(context, share_instance_id) share_server = self._get_share_server(context, share_instance) return self.access_helper.update_access_rules( context, share_instance_id, delete_rules=delete_rules, share_server=share_server ) @periodic_task.periodic_task(spacing=CONF.periodic_interval) @utils.require_driver_initialized def _report_driver_status(self, context): LOG.info(_LI('Updating share status')) share_stats = self.driver.get_share_stats(refresh=True) if not share_stats: return if self.driver.driver_handles_share_servers: share_stats['server_pools_mapping'] = ( self._get_servers_pool_mapping(context) ) self.update_service_capabilities(share_stats) @periodic_task.periodic_task(spacing=CONF.periodic_hooks_interval) @utils.require_driver_initialized def _execute_periodic_hook(self, context): """Executes periodic-based hooks.""" # TODO(vponomaryov): add also access rules and share servers share_instances = ( self.db.share_instances_get_all_by_host( context=context, host=self.host)) periodic_hook_data = self.driver.get_periodic_hook_data( context=context, share_instances=share_instances) for hook in self.hooks: hook.execute_periodic_hook( context=context, periodic_hook_data=periodic_hook_data) def _get_servers_pool_mapping(self, context): """Get info about relationships between pools and share_servers.""" share_servers = self.db.share_server_get_all_by_host(context, self.host) return {server['id']: self.driver.get_share_server_pools(server) for server in share_servers} @add_hooks @utils.require_driver_initialized def publish_service_capabilities(self, context): """Collect driver status and then publish it.""" self._report_driver_status(context) self._publish_service_capabilities(context) def _form_server_setup_info(self, context, share_server, share_network): # Network info is used by driver for setting up share server # and getting server info on share creation. network_allocations = self.db.network_allocations_get_for_share_server( context, share_server['id'], label='user') admin_network_allocations = ( self.db.network_allocations_get_for_share_server( context, share_server['id'], label='admin')) # NOTE(vponomaryov): following network_info fields are deprecated: # 'segmentation_id', 'cidr' and 'network_type'. # And they should be used from network allocations directly. # They should be removed right after no one uses them. network_info = { 'server_id': share_server['id'], 'segmentation_id': share_network['segmentation_id'], 'cidr': share_network['cidr'], 'neutron_net_id': share_network['neutron_net_id'], 'neutron_subnet_id': share_network['neutron_subnet_id'], 'nova_net_id': share_network['nova_net_id'], 'security_services': share_network['security_services'], 'network_allocations': network_allocations, 'admin_network_allocations': admin_network_allocations, 'backend_details': share_server.get('backend_details'), 'network_type': share_network['network_type'], } return network_info def _setup_server(self, context, share_server, metadata=None): try: share_network = self.db.share_network_get( context, share_server['share_network_id']) self.driver.allocate_network(context, share_server, share_network) self.driver.allocate_admin_network(context, share_server) # Get share_network again in case it was updated. share_network = self.db.share_network_get( context, share_server['share_network_id']) network_info = self._form_server_setup_info( context, share_server, share_network) self._validate_segmentation_id(network_info) # NOTE(vponomaryov): Save security services data to share server # details table to remove dependency from share network after # creation operation. It will allow us to delete share server and # share network separately without dependency on each other. for security_service in network_info['security_services']: ss_type = security_service['type'] data = { 'name': security_service['name'], 'domain': security_service['domain'], 'server': security_service['server'], 'dns_ip': security_service['dns_ip'], 'user': security_service['user'], 'type': ss_type, 'password': security_service['password'], } self.db.share_server_backend_details_set( context, share_server['id'], {'security_service_' + ss_type: jsonutils.dumps(data)}) server_info = self.driver.setup_server( network_info, metadata=metadata) if server_info and isinstance(server_info, dict): self.db.share_server_backend_details_set( context, share_server['id'], server_info) return self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ACTIVE}) except Exception as e: with excutils.save_and_reraise_exception(): details = getattr(e, "detail_data", {}) if isinstance(details, dict): server_details = details.get("server_details", {}) if not isinstance(server_details, dict): LOG.debug( ("Cannot save non-dict data (%(data)s) " "provided as 'server details' of " "failed share server '%(server)s'."), {"server": share_server["id"], "data": server_details}) else: invalid_details = [] for key, value in server_details.items(): try: self.db.share_server_backend_details_set( context, share_server['id'], {key: value}) except Exception: invalid_details.append("%(key)s: %(value)s" % { 'key': six.text_type(key), 'value': six.text_type(value) }) if invalid_details: LOG.debug( ("Following server details " "cannot be written to db : %s"), six.text_type("\n".join(invalid_details))) else: LOG.debug( ("Cannot save non-dict data (%(data)s) provided as " "'detail data' of failed share server '%(server)s'."), {"server": share_server["id"], "data": details}) self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ERROR}) self.driver.deallocate_network(context, share_server['id']) def _validate_segmentation_id(self, network_info): """Raises exception if the segmentation type is incorrect.""" if (network_info['network_type'] in (None, 'flat') and network_info['segmentation_id']): msg = _('A segmentation ID %(vlan_id)s was specified but can not ' 'be used with a network of type %(seg_type)s; the ' 'segmentation ID option must be omitted or set to 0') raise exception.NetworkBadConfigurationException( reason=msg % {'vlan_id': network_info['segmentation_id'], 'seg_type': network_info['network_type']}) elif (network_info['network_type'] == 'vlan' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 4094 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a VLAN network type; the segmentation ID must be an ' 'integer value in the range of [1,4094]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) elif (network_info['network_type'] == 'vxlan' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 16777215 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a VXLAN network type; the segmentation ID must be an ' 'integer value in the range of [1,16777215]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) elif (network_info['network_type'] == 'gre' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 4294967295 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a GRE network type; the segmentation ID must be an ' 'integer value in the range of [1, 4294967295]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) @add_hooks @utils.require_driver_initialized def delete_share_server(self, context, share_server): @utils.synchronized( "share_manager_%s" % share_server['share_network_id']) def _teardown_server(): # NOTE(vponomaryov): Verify that there are no dependent shares. # Without this verification we can get here exception in next case: # share-server-delete API was called after share creation scheduled # and share_server reached ACTIVE status, but before update # of share_server_id field for share. If so, after lock realese # this method starts executing when amount of dependent shares # has been changed. server_id = share_server['id'] shares = self.db.share_instances_get_all_by_share_server( context, server_id) if shares: raise exception.ShareServerInUse(share_server_id=server_id) server_details = share_server['backend_details'] self.db.share_server_update(context, server_id, {'status': constants.STATUS_DELETING}) try: LOG.debug("Deleting share server '%s'", server_id) security_services = [] for ss_name in constants.SECURITY_SERVICES_ALLOWED_TYPES: ss = server_details.get('security_service_' + ss_name) if ss: security_services.append(jsonutils.loads(ss)) self.driver.teardown_server( server_details=server_details, security_services=security_services) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Share server '%s' failed on deletion."), server_id) self.db.share_server_update( context, server_id, {'status': constants.STATUS_ERROR}) else: self.db.share_server_delete(context, share_server['id']) _teardown_server() LOG.info( _LI("Share server '%s' has been deleted successfully."), share_server['id']) self.driver.deallocate_network(context, share_server['id']) def _verify_unused_share_server_cleanup_interval(self): if not 10 <= self.configuration.\ unused_share_server_cleanup_interval <= 60: raise exception.InvalidParameterValue( "Option unused_share_server_cleanup_interval should be " "between 10 minutes and 1 hour.") @add_hooks @utils.require_driver_initialized def extend_share(self, context, share_id, new_size, reservations): context = context.elevated() share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) share_server = self._get_share_server(context, share_instance) project_id = share['project_id'] user_id = share['user_id'] try: self.driver.extend_share( share_instance, new_size, share_server=share_server) except Exception as e: LOG.exception(_LE("Extend share failed."), resource=share) try: self.db.share_update( context, share['id'], {'status': constants.STATUS_EXTENDING_ERROR} ) raise exception.ShareExtendingError( reason=six.text_type(e), share_id=share_id) finally: QUOTAS.rollback(context, reservations, project_id=project_id, user_id=user_id) # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased QUOTAS.commit(context, reservations, project_id=project_id, user_id=user_id) share_update = { 'size': int(new_size), # NOTE(u_glide): translation to lower case should be removed in # a row with usage of upper case of share statuses in all places 'status': constants.STATUS_AVAILABLE.lower() } share = self.db.share_update(context, share['id'], share_update) LOG.info(_LI("Extend share completed successfully."), resource=share) @add_hooks @utils.require_driver_initialized def shrink_share(self, context, share_id, new_size): context = context.elevated() share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) share_server = self._get_share_server(context, share_instance) project_id = share['project_id'] user_id = share['user_id'] new_size = int(new_size) def error_occurred(exc, msg, status=constants.STATUS_SHRINKING_ERROR): LOG.exception(msg, resource=share) self.db.share_update(context, share['id'], {'status': status}) raise exception.ShareShrinkingError( reason=six.text_type(exc), share_id=share_id) reservations = None try: size_decrease = int(share['size']) - new_size # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased reservations = QUOTAS.reserve(context, project_id=project_id, user_id=user_id, gigabytes=-size_decrease) except Exception as e: error_occurred( e, _LE("Failed to update quota on share shrinking.")) try: self.driver.shrink_share( share_instance, new_size, share_server=share_server) # NOTE(u_glide): Replace following except block by error notification # when Manila has such mechanism. It's possible because drivers # shouldn't shrink share when this validation error occurs. except Exception as e: if isinstance(e, exception.ShareShrinkingPossibleDataLoss): msg = _LE("Shrink share failed due to possible data loss.") status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR error_params = {'msg': msg, 'status': status} else: error_params = {'msg': _LE("Shrink share failed.")} try: error_occurred(e, **error_params) finally: QUOTAS.rollback(context, reservations, project_id=project_id, user_id=user_id) QUOTAS.commit(context, reservations, project_id=project_id, user_id=user_id) share_update = { 'size': new_size, 'status': constants.STATUS_AVAILABLE } share = self.db.share_update(context, share['id'], share_update) LOG.info(_LI("Shrink share completed successfully."), resource=share) @utils.require_driver_initialized def create_consistency_group(self, context, cg_id): context = context.elevated() group_ref = self.db.consistency_group_get(context, cg_id) group_ref['host'] = self.host shares = self.db.share_instances_get_all_by_consistency_group_id( context, cg_id) source_cgsnapshot_id = group_ref.get("source_cgsnapshot_id") snap_ref = None parent_share_server_id = None if source_cgsnapshot_id: snap_ref = self.db.cgsnapshot_get(context, source_cgsnapshot_id) for member in snap_ref['cgsnapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) member['share_id'] = member['share_instance_id'] if 'consistency_group' in snap_ref: parent_share_server_id = snap_ref['consistency_group'][ 'share_server_id'] status = constants.STATUS_AVAILABLE model_update = False share_network_id = group_ref.get('share_network_id', None) share_server = None if parent_share_server_id and self.driver.driver_handles_share_servers: share_server = self.db.share_server_get(context, parent_share_server_id) share_network_id = share_server['share_network_id'] if share_network_id and not self.driver.driver_handles_share_servers: self.db.consistency_group_update( context, cg_id, {'status': constants.STATUS_ERROR}) msg = _("Driver does not expect share-network to be provided " "with current configuration.") raise exception.InvalidInput(reason=msg) if not share_server and share_network_id: try: share_server, group_ref = self._provide_share_server_for_cg( context, share_network_id, group_ref, cgsnapshot=snap_ref ) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to get share server" " for consistency group creation.")) self.db.consistency_group_update( context, cg_id, {'status': constants.STATUS_ERROR}) try: # TODO(ameade): Add notification for create.start LOG.info(_LI("Consistency group %s: creating"), cg_id) model_update, share_update_list = None, None group_ref['shares'] = shares if snap_ref: model_update, share_update_list = ( self.driver.create_consistency_group_from_cgsnapshot( context, group_ref, snap_ref, share_server=share_server)) else: model_update = self.driver.create_consistency_group( context, group_ref, share_server=share_server) if model_update: group_ref = self.db.consistency_group_update(context, group_ref['id'], model_update) if share_update_list: for share in share_update_list: values = copy.deepcopy(share) values.pop('id') export_locations = values.pop('export_locations') self.db.share_instance_update(context, share['id'], values) self.db.share_export_locations_update(context, share['id'], export_locations) except Exception: with excutils.save_and_reraise_exception(): self.db.consistency_group_update( context, group_ref['id'], {'status': constants.STATUS_ERROR}) for share in shares: self.db.share_instance_update( context, share['id'], {'status': constants.STATUS_ERROR}) LOG.error(_LE("Consistency group %s: create failed"), cg_id) now = timeutils.utcnow() for share in shares: self.db.share_instance_update( context, share['id'], {'status': constants.STATUS_AVAILABLE}) self.db.consistency_group_update(context, group_ref['id'], {'status': status, 'created_at': now}) LOG.info(_LI("Consistency group %s: created successfully"), cg_id) # TODO(ameade): Add notification for create.end return group_ref['id'] @utils.require_driver_initialized def delete_consistency_group(self, context, cg_id): context = context.elevated() group_ref = self.db.consistency_group_get(context, cg_id) group_ref['host'] = self.host group_ref['shares'] = ( self.db.share_instances_get_all_by_consistency_group_id( context, cg_id)) model_update = False # TODO(ameade): Add notification for delete.start try: LOG.info(_LI("Consistency group %s: deleting"), cg_id) share_server = None if group_ref.get('share_server_id'): share_server = self.db.share_server_get( context, group_ref['share_server_id']) model_update = self.driver.delete_consistency_group( context, group_ref, share_server=share_server) if model_update: group_ref = self.db.consistency_group_update( context, group_ref['id'], model_update) except Exception: with excutils.save_and_reraise_exception(): self.db.consistency_group_update( context, group_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error(_LE("Consistency group %s: delete failed"), group_ref['id']) self.db.consistency_group_destroy(context, cg_id) LOG.info(_LI("Consistency group %s: deleted successfully"), cg_id) # TODO(ameade): Add notification for delete.end @utils.require_driver_initialized def create_cgsnapshot(self, context, cgsnapshot_id): context = context.elevated() snap_ref = self.db.cgsnapshot_get(context, cgsnapshot_id) for member in snap_ref['cgsnapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) member['share_id'] = member['share_instance_id'] status = constants.STATUS_AVAILABLE snapshot_update = False try: LOG.info(_LI("Consistency group snapshot %s: creating"), cgsnapshot_id) share_server = None if snap_ref['consistency_group'].get('share_server_id'): share_server = self.db.share_server_get( context, snap_ref['consistency_group']['share_server_id']) snapshot_update, member_update_list = ( self.driver.create_cgsnapshot(context, snap_ref, share_server=share_server)) if member_update_list: snapshot_update = snapshot_update or {} snapshot_update['cgsnapshot_members'] = [] for update in (member_update_list or []): snapshot_update['cgsnapshot_members'].append(update) if snapshot_update: snap_ref = self.db.cgsnapshot_update( context, snap_ref['id'], snapshot_update) except Exception: with excutils.save_and_reraise_exception(): self.db.cgsnapshot_update( context, snap_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error(_LE("Consistency group snapshot %s: create failed"), cgsnapshot_id) now = timeutils.utcnow() for member in (snap_ref.get('cgsnapshot_members') or []): update = {'status': status, 'created_at': now} self.db.cgsnapshot_member_update(context, member['id'], update) self.db.cgsnapshot_update(context, snap_ref['id'], {'status': status, 'created_at': now}) LOG.info(_LI("Consistency group snapshot %s: created successfully"), cgsnapshot_id) return snap_ref['id'] @utils.require_driver_initialized def delete_cgsnapshot(self, context, cgsnapshot_id): context = context.elevated() snap_ref = self.db.cgsnapshot_get(context, cgsnapshot_id) for member in snap_ref['cgsnapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) member['share_id'] = member['share_instance_id'] snapshot_update = False try: LOG.info(_LI("Consistency group snapshot %s: deleting"), cgsnapshot_id) share_server = None if snap_ref['consistency_group'].get('share_server_id'): share_server = self.db.share_server_get( context, snap_ref['consistency_group']['share_server_id']) snapshot_update, member_update_list = ( self.driver.delete_cgsnapshot(context, snap_ref, share_server=share_server)) if member_update_list: snapshot_update = snapshot_update or {} snapshot_update['cgsnapshot_members'] = [] for update in (member_update_list or []): snapshot_update['cgsnapshot_members'].append(update) if snapshot_update: snap_ref = self.db.cgsnapshot_update( context, snap_ref['id'], snapshot_update) except Exception: with excutils.save_and_reraise_exception(): self.db.cgsnapshot_update( context, snap_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error(_LE("Consistency group snapshot %s: delete failed"), snap_ref['name']) self.db.cgsnapshot_destroy(context, cgsnapshot_id) LOG.info(_LI("Consistency group snapshot %s: deleted successfully"), cgsnapshot_id) def _get_share_replica_dict(self, context, share_replica): # TODO(gouthamr): remove method when the db layer returns primitives share_replica_ref = { 'id': share_replica.get('id'), 'share_id': share_replica.get('share_id'), 'host': share_replica.get('host'), 'status': share_replica.get('status'), 'replica_state': share_replica.get('replica_state'), 'availability_zone_id': share_replica.get('availability_zone_id'), 'export_locations': share_replica.get('export_locations') or [], 'share_network_id': share_replica.get('share_network_id'), 'share_server_id': share_replica.get('share_server_id'), 'deleted': share_replica.get('deleted'), 'terminated_at': share_replica.get('terminated_at'), 'launched_at': share_replica.get('launched_at'), 'scheduled_at': share_replica.get('scheduled_at'), 'share_server': self._get_share_server(context, share_replica), 'access_rules_status': share_replica.get('access_rules_status'), # Share details 'user_id': share_replica.get('user_id'), 'project_id': share_replica.get('project_id'), 'size': share_replica.get('size'), 'display_name': share_replica.get('display_name'), 'display_description': share_replica.get('display_description'), 'snapshot_id': share_replica.get('snapshot_id'), 'share_proto': share_replica.get('share_proto'), 'share_type_id': share_replica.get('share_type_id'), 'is_public': share_replica.get('is_public'), 'consistency_group_id': share_replica.get('consistency_group_id'), 'source_cgsnapshot_member_id': share_replica.get( 'source_cgsnapshot_member_id'), } return share_replica_ref def _get_snapshot_instance_dict(self, context, snapshot_instance): # TODO(gouthamr): remove method when the db layer returns primitives snapshot_instance_ref = { 'name': snapshot_instance.get('name'), 'share_id': snapshot_instance.get('share_id'), 'share_name': snapshot_instance.get('share_name'), 'status': snapshot_instance.get('status'), 'id': snapshot_instance.get('id'), 'deleted': snapshot_instance.get('deleted') or False, 'created_at': snapshot_instance.get('created_at'), 'share': snapshot_instance.get('share'), 'updated_at': snapshot_instance.get('updated_at'), 'share_instance_id': snapshot_instance.get('share_instance_id'), 'snapshot_id': snapshot_instance.get('snapshot_id'), 'progress': snapshot_instance.get('progress'), 'deleted_at': snapshot_instance.get('deleted_at'), 'provider_location': snapshot_instance.get('provider_location'), } return snapshot_instance_ref manila-2.0.0/manila/share/access.py0000664000567000056710000001512412701407107020273 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six from manila.common import constants from manila.i18n import _LI LOG = log.getLogger(__name__) class ShareInstanceAccess(object): def __init__(self, db, driver): self.db = db self.driver = driver def update_access_rules(self, context, share_instance_id, add_rules=None, delete_rules=None, share_server=None): """Update access rules in driver and database for given share instance. :param context: current context :param share_instance_id: Id of the share instance model :param add_rules: list with ShareAccessMapping models or None - rules which should be added :param delete_rules: list with ShareAccessMapping models, "all", None - rules which should be deleted. If "all" is provided - all rules will be deleted. :param share_server: Share server model or None """ self.db.share_instance_update_access_status( context, share_instance_id, constants.STATUS_UPDATING ) share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) add_rules = add_rules or [] delete_rules = delete_rules or [] remove_rules = None if six.text_type(delete_rules).lower() == "all": # NOTE(ganso): if we are deleting an instance or clearing all # the rules, we want to remove only the ones related # to this instance. delete_rules = self.db.share_access_get_all_for_instance( context, share_instance['id']) rules = [] else: rules = self.db.share_access_get_all_for_instance( context, share_instance['id']) if delete_rules: delete_ids = [rule['id'] for rule in delete_rules] rules = list(filter(lambda r: r['id'] not in delete_ids, rules)) # NOTE(ganso): trigger maintenance mode if share_instance['access_rules_status'] == ( constants.STATUS_ERROR): remove_rules = delete_rules delete_rules = [] try: try: self.driver.update_access( context, share_instance, rules, add_rules=add_rules, delete_rules=delete_rules, share_server=share_server ) except NotImplementedError: # NOTE(u_glide): Fallback to legacy allow_access/deny_access # for drivers without update_access() method support self._update_access_fallback(add_rules, context, delete_rules, remove_rules, share_instance, share_server) except Exception: self.db.share_instance_update_access_status( context, share_instance['id'], constants.STATUS_ERROR) raise # NOTE(ganso): remove rules after maintenance is complete if remove_rules: delete_rules = remove_rules self._remove_access_rules(context, delete_rules, share_instance['id']) share_instance = self.db.share_instance_get(context, share_instance_id, with_share_data=True) if self._check_needs_refresh(context, rules, share_instance): self.update_access_rules(context, share_instance_id, share_server=share_server) else: self.db.share_instance_update_access_status( context, share_instance['id'], constants.STATUS_ACTIVE ) LOG.info(_LI("Access rules were successfully applied for " "share instance: %s"), share_instance['id']) def _check_needs_refresh(self, context, rules, share_instance): rule_ids = set([rule['id'] for rule in rules]) queried_rules = self.db.share_access_get_all_for_instance( context, share_instance['id']) queried_ids = set([rule['id'] for rule in queried_rules]) access_rules_status = share_instance['access_rules_status'] return (access_rules_status == constants.STATUS_UPDATING_MULTIPLE or rule_ids != queried_ids) def _update_access_fallback(self, add_rules, context, delete_rules, remove_rules, share_instance, share_server): for rule in add_rules: LOG.info( _LI("Applying access rule '%(rule)s' for share " "instance '%(instance)s'"), {'rule': rule['id'], 'instance': share_instance['id']} ) self.driver.allow_access( context, share_instance, rule, share_server=share_server ) # NOTE(ganso): Fallback mode temporary compatibility workaround if remove_rules: delete_rules = remove_rules for rule in delete_rules: LOG.info( _LI("Denying access rule '%(rule)s' from share " "instance '%(instance)s'"), {'rule': rule['id'], 'instance': share_instance['id']} ) self.driver.deny_access( context, share_instance, rule, share_server=share_server ) def _remove_access_rules(self, context, access_rules, share_instance_id): if not access_rules: return for rule in access_rules: access_mapping = self.db.share_instance_access_get( context, rule['id'], share_instance_id) self.db.share_instance_access_delete(context, access_mapping['id']) manila-2.0.0/manila/share/driver.py0000664000567000056710000022664412701407112020334 0ustar jenkinsjenkins00000000000000# Copyright 2012 NetApp # Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Drivers for shares. """ import time from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _, _LE from manila import network from manila import utils LOG = log.getLogger(__name__) share_opts = [ # NOTE(rushiagr): Reasonable to define this option at only one place. cfg.IntOpt( 'num_shell_tries', default=3, help='Number of times to attempt to run flakey shell commands.'), cfg.IntOpt( 'reserved_share_percentage', default=0, help='The percentage of backend capacity reserved.'), cfg.StrOpt( 'share_backend_name', help='The backend name for a given driver implementation.'), cfg.StrOpt( 'network_config_group', help="Name of the configuration group in the Manila conf file " "to look for network config options." "If not set, the share backend's config group will be used." "If an option is not found within provided group, then" "'DEFAULT' group will be used for search of option."), cfg.BoolOpt( 'driver_handles_share_servers', help="There are two possible approaches for share drivers in Manila. " "First is when share driver is able to handle share-servers and " "second when not. Drivers can support either both or only one " "of these approaches. So, set this opt to True if share driver " "is able to handle share servers and it is desired mode else set " "False. It is set to None by default to make this choice " "intentional."), cfg.FloatOpt( 'max_over_subscription_ratio', default=20.0, help='Float representation of the over subscription ratio ' 'when thin provisioning is involved. Default ratio is ' '20.0, meaning provisioned capacity can be 20 times ' 'the total physical capacity. If the ratio is 10.5, it ' 'means provisioned capacity can be 10.5 times the ' 'total physical capacity. A ratio of 1.0 means ' 'provisioned capacity cannot exceed the total physical ' 'capacity. A ratio lower than 1.0 is invalid.'), cfg.ListOpt( 'migration_ignore_files', default=['lost+found'], help="List of files and folders to be ignored when migrating shares. " "Items should be names (not including any path)."), cfg.StrOpt( 'share_mount_template', default='mount -vt %(proto)s %(export)s %(path)s', help="The template for mounting shares for this backend. Must specify " "the executable with all necessary parameters for the protocol " "supported. 'proto' template element may not be required if " "included in the command. 'export' and 'path' template elements " "are required. It is advisable to separate different commands " "per backend."), cfg.StrOpt( 'share_unmount_template', default='umount -v %(path)s', help="The template for unmounting shares for this backend. Must " "specify the executable with all necessary parameters for the " "protocol supported. 'path' template element is required. It is " "advisable to separate different commands per backend."), cfg.BoolOpt( 'migration_readonly_rules_support', default=True, deprecated_name='migration_readonly_support', help="Specify whether read only access rule mode is supported in this " "backend."), cfg.StrOpt( "admin_network_config_group", help="If share driver requires to setup admin network for share, then " "define network plugin config options in some separate config " "group and set its name here. Used only with another " "option 'driver_handles_share_servers' set to 'True'."), # Replication option/s cfg.StrOpt( "replication_domain", default=None, help="A string specifying the replication domain that the backend " "belongs to. This option needs to be specified the same in the " "configuration sections of all backends that support " "replication between each other. If this option is not " "specified in the group, it means that replication is not " "enabled on the backend."), ] ssh_opts = [ cfg.IntOpt( 'ssh_conn_timeout', default=60, help='Backend server SSH connection timeout.'), cfg.IntOpt( 'ssh_min_pool_conn', default=1, help='Minimum number of connections in the SSH pool.'), cfg.IntOpt( 'ssh_max_pool_conn', default=10, help='Maximum number of connections in the SSH pool.'), ] ganesha_opts = [ cfg.StrOpt('ganesha_config_dir', default='/etc/ganesha', help='Directory where Ganesha config files are stored.'), cfg.StrOpt('ganesha_config_path', default='$ganesha_config_dir/ganesha.conf', help='Path to main Ganesha config file.'), cfg.StrOpt('ganesha_nfs_export_options', default='maxread = 65536, prefread = 65536', help='Options to use when exporting a share using ganesha ' 'NFS server. Note that these defaults can be overridden ' 'when a share is created by passing metadata with key ' 'name export_options. Also note the complete set of ' 'default ganesha export options is specified in ' 'ganesha_utils. (GPFS only.)'), cfg.StrOpt('ganesha_service_name', default='ganesha.nfsd', help='Name of the ganesha nfs service.'), cfg.StrOpt('ganesha_db_path', default='$state_path/manila-ganesha.db', help='Location of Ganesha database file. ' '(Ganesha module only.)'), cfg.StrOpt('ganesha_export_dir', default='$ganesha_config_dir/export.d', help='Path to directory containing Ganesha export ' 'configuration. (Ganesha module only.)'), cfg.StrOpt('ganesha_export_template_dir', default='/etc/manila/ganesha-export-templ.d', help='Path to directory containing Ganesha export ' 'block templates. (Ganesha module only.)'), ] CONF = cfg.CONF CONF.register_opts(share_opts) CONF.register_opts(ssh_opts) CONF.register_opts(ganesha_opts) class ExecuteMixin(object): """Provides an executable functionality to a driver class.""" def init_execute_mixin(self, *args, **kwargs): if self.configuration: self.configuration.append_config_values(ssh_opts) self.set_execute(kwargs.pop('execute', utils.execute)) def set_execute(self, execute): self._execute = execute def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except exception.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception(_LE("Recovering from a failed execute. " "Try number %s"), tries) time.sleep(tries ** 2) class GaneshaMixin(object): """Augment derived classes with Ganesha configuration.""" def init_ganesha_mixin(self, *args, **kwargs): if self.configuration: self.configuration.append_config_values(ganesha_opts) class ShareDriver(object): """Class defines interface of NAS driver.""" def __init__(self, driver_handles_share_servers, *args, **kwargs): """Implements base functionality for share drivers. :param driver_handles_share_servers: expected boolean value or tuple/list/set of boolean values. There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both (indicated by a tuple/set/list with (True, False)) or only one of these approaches. So, it is allowed to be 'True' when share driver does support handling of share servers and allowed to be 'False' when it does support usage of unhandled share-servers that are not tracked by Manila. Share drivers are allowed to work only in one of two possible driver modes, that is why only one should be chosen. :param config_opts: tuple, list or set of config option lists that should be registered in driver's configuration right after this attribute is created. Useful for usage with mixin classes. """ super(ShareDriver, self).__init__() self.configuration = kwargs.get('configuration', None) self.initialized = False self._stats = {} self.pools = [] if self.configuration: self.configuration.append_config_values(share_opts) network_config_group = (self.configuration.network_config_group or self.configuration.config_group) admin_network_config_group = ( self.configuration.admin_network_config_group) else: network_config_group = None admin_network_config_group = ( CONF.admin_network_config_group) self._verify_share_server_handling(driver_handles_share_servers) if self.driver_handles_share_servers: # Enable common network self.network_api = network.API( config_group_name=network_config_group) # Enable admin network if admin_network_config_group: self._admin_network_api = network.API( config_group_name=admin_network_config_group, label='admin') for config_opt_set in kwargs.get('config_opts', []): self.configuration.append_config_values(config_opt_set) if hasattr(self, 'init_execute_mixin'): # Instance with 'ExecuteMixin' self.init_execute_mixin(*args, **kwargs) # pylint: disable=E1101 if hasattr(self, 'init_ganesha_mixin'): # Instance with 'GaneshaMixin' self.init_ganesha_mixin(*args, **kwargs) # pylint: disable=E1101 @property def admin_network_api(self): if hasattr(self, '_admin_network_api'): return self._admin_network_api @property def driver_handles_share_servers(self): if self.configuration: return self.configuration.safe_get('driver_handles_share_servers') return CONF.driver_handles_share_servers @property def replication_domain(self): if self.configuration: return self.configuration.safe_get('replication_domain') return CONF.replication_domain def _verify_share_server_handling(self, driver_handles_share_servers): """Verifies driver_handles_share_servers and given configuration.""" if not isinstance(self.driver_handles_share_servers, bool): raise exception.ManilaException( "Config opt 'driver_handles_share_servers' has improper " "value - '%s'. Please define it as boolean." % self.driver_handles_share_servers) elif isinstance(driver_handles_share_servers, bool): driver_handles_share_servers = [driver_handles_share_servers] elif not isinstance(driver_handles_share_servers, (tuple, list, set)): raise exception.ManilaException( "Improper data provided for 'driver_handles_share_servers' - " "%s" % driver_handles_share_servers) if any(not isinstance(v, bool) for v in driver_handles_share_servers): raise exception.ManilaException( "Provided wrong data: %s" % driver_handles_share_servers) if (self.driver_handles_share_servers not in driver_handles_share_servers): raise exception.ManilaException( "Driver does not support mode 'driver_handles_share_servers=" "%(actual)s'. It can be used only with value '%(allowed)s'." % {'actual': self.driver_handles_share_servers, 'allowed': driver_handles_share_servers}) def migration_start(self, context, share_ref, share_server, host, dest_driver_migration_info, notify): """Is called to perform 1st phase of driver migration of a given share. Driver should implement this method if willing to perform migration in an optimized way, useful for when driver understands destination backend. :param context: The 'context.RequestContext' object for the request. :param share_ref: Reference to the share being migrated. :param share_server: Share server model or None. :param host: Destination host and its capabilities. :param dest_driver_migration_info: Migration information provided by destination host. :param notify: whether the migration should complete or wait for 2nd phase call. Driver may throw exception when validating this parameter, exception if does not support 1-phase or 2-phase approach. :returns: Boolean value indicating if driver migration succeeded. :returns: Dictionary containing a model update. """ return None, None def migration_complete(self, context, share_ref, share_server, dest_driver_migration_info): """Is called to perform 2nd phase of driver migration of a given share. If driver is implementing 2-phase migration, this method should perform tasks related to the 2nd phase of migration, thus completing it. :param context: The 'context.RequestContext' object for the request. :param share_ref: Reference to the share being migrated. :param share_server: Share server model or None. :param dest_driver_migration_info: Migration information provided by destination host. :returns: Dictionary containing a model update. """ return None def migration_cancel(self, context, share_ref, share_server, dest_driver_migration_info): """Is called to cancel driver migration. If possible, driver can implement a way to cancel an in-progress migration. :param context: The 'context.RequestContext' object for the request. :param share_ref: Reference to the share being migrated. :param share_server: Share server model or None. :param dest_driver_migration_info: Migration information provided by destination host. """ raise NotImplementedError() def migration_get_progress(self, context, share_ref, share_server, dest_driver_migration_info): """Is called to get migration progress. If possible, driver can implement a way to return migration progress information. :param context: The 'context.RequestContext' object for the request. :param share_ref: Reference to the share being migrated. :param share_server: Share server model or None. :param dest_driver_migration_info: Migration information provided by destination host. :return: A dictionary with 'total_progress' field containing the percentage value. """ raise NotImplementedError() def migration_get_driver_info(self, context, share, share_server): """Is called to provide necessary driver migration logic. :param context: The 'context.RequestContext' object for the request. :param share: Reference to the share being migrated. :param share_server: Share server model or None. :return: A dictionary with migration information. """ return None def migration_get_info(self, context, share, share_server): """Is called to provide necessary generic migration logic. :param context: The 'context.RequestContext' object for the request. :param share: Reference to the share being migrated. :param share_server: Share server model or None. :return: A dictionary with migration information. """ mount_template = self._get_mount_command(context, share, share_server) unmount_template = self._get_unmount_command(context, share, share_server) return {'mount': mount_template, 'unmount': unmount_template} def _get_mount_command(self, context, share_instance, share_server): """Is called to delegate mounting share logic.""" mount_template = self.configuration.safe_get('share_mount_template') mount_export = self._get_mount_export(share_instance, share_server) format_template = {'proto': share_instance['share_proto'].lower(), 'export': mount_export, 'path': '%(path)s'} return mount_template % format_template def _get_mount_export(self, share_instance, share_server): # NOTE(ganso): If drivers want to override the export_location IP, # they can do so using this configuration. This method can also be # overridden if necessary. path = next((x['path'] for x in share_instance['export_locations'] if x['is_admin_only']), None) if not path: path = share_instance['export_locations'][0]['path'] return path def _get_unmount_command(self, context, share_instance, share_server): return self.configuration.safe_get('share_unmount_template') def create_share(self, context, share, share_server=None): """Is called to create share.""" raise NotImplementedError() def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Is called to create share from snapshot.""" raise NotImplementedError() def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot. :param context: Current context :param snapshot: Snapshot model. Share model could be retrieved through snapshot['share']. :param share_server: Share server model or None. """ raise NotImplementedError() def delete_share(self, context, share, share_server=None): """Is called to remove share.""" raise NotImplementedError() def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot. :param context: Current context :param snapshot: Snapshot model. Share model could be retrieved through snapshot['share']. :param share_server: Share server model or None. """ raise NotImplementedError() def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported. Driver can use this method to update the list of export locations of the share if it changes. To do that, you should return list with export locations. :return None or list with export locations """ raise NotImplementedError() def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" raise NotImplementedError() def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share. Drivers should support 2 different cases in this method: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Driver should clear any existent access rules and apply all access rules for given share. This recovery is made at driver start up. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Driver can ignore rules in 'access_rules' and apply only rules from 'add_rules' and 'delete_rules'. Drivers must be mindful of this call for share replicas. When 'update_access' is called on one of the replicas, the call is likely propagated to all replicas belonging to the share, especially when individual rules are added or removed. If a particular access rule does not make sense to the driver in the context of a given replica, the driver should be careful to report a correct behavior, and take meaningful action. For example, if R/W access is requested on a replica that is part of a "readable" type replication; R/O access may be added by the driver instead of R/W. Note that raising an exception *will* result in the access_rules_status on the replica, and the share itself being "out_of_sync". Drivers can sync on the valid access rules that are provided on the create_replica and promote_replica calls. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ raise NotImplementedError() def check_for_setup_error(self): """Check for setup error.""" max_ratio = self.configuration.safe_get('max_over_subscription_ratio') if not max_ratio or float(max_ratio) < 1.0: msg = (_("Invalid max_over_subscription_ratio '%s'. " "Valid value should be >= 1.0.") % max_ratio) raise exception.InvalidParameterValue(err=msg) def do_setup(self, context): """Any initialization the share driver does while starting.""" def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_share_stats() return self._stats def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs. Drivers that use Nova for share servers should return zero (0) here same as Generic driver does. Because Nova will handle network resources allocation. Drivers that handle networking itself should calculate it according to their own requirements. It can have 1+ network interfaces. """ raise NotImplementedError() def get_admin_network_allocations_number(self): return 0 def allocate_network(self, context, share_server, share_network, count=None, **kwargs): """Allocate network resources using given network information.""" if count is None: count = self.get_network_allocations_number() if count: kwargs.update(count=count) self.network_api.allocate_network( context, share_server, share_network, **kwargs) def allocate_admin_network(self, context, share_server, count=None, **kwargs): """Allocate admin network resources using given network information.""" if count is None: count = self.get_admin_network_allocations_number() if count and not self.admin_network_api: msg = _("Admin network plugin is not set up.") raise exception.NetworkBadConfigurationException(reason=msg) elif count: kwargs.update(count=count) self.admin_network_api.allocate_network( context, share_server, **kwargs) def deallocate_network(self, context, share_server_id): """Deallocate network resources for the given share server.""" if self.get_network_allocations_number(): self.network_api.deallocate_network(context, share_server_id) def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, consistency_group=None): """Method that allows driver to choose share server for provided share. If compatible share-server is not found, method should return None. :param context: Current context :param share_servers: list with share-server models :param share: share model :param snapshot: snapshot model :param consistency_group: ConsistencyGroup model with shares :returns: share-server or None """ # If creating in a consistency group, use its share server if consistency_group: for share_server in share_servers: if (consistency_group.get('share_server_id') == share_server['id']): return share_server return None return share_servers[0] if share_servers else None def choose_share_server_compatible_with_cg(self, context, share_servers, cg_ref, cgsnapshot=None): return share_servers[0] if share_servers else None def setup_server(self, *args, **kwargs): if self.driver_handles_share_servers: return self._setup_server(*args, **kwargs) else: LOG.debug( "Skipping step 'setup share server', because driver is " "enabled with mode when Manila does not handle share servers.") def _setup_server(self, network_info, metadata=None): """Sets up and configures share server with given network parameters. Redefine it within share driver when it is going to handle share servers. """ raise NotImplementedError() def manage_existing(self, share, driver_options): """Brings an existing share under Manila management. If provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a ManageExistingShareTypeMismatch, specifying a reason for the failure. :param share: Share model :param driver_options: Driver-specific options provided by admin. :return: share_update dictionary with required key 'size', which should contain size of the share. """ raise NotImplementedError() def unmanage(self, share): """Removes the specified share from Manila management. Does not delete the underlying backend share. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share. If provided share cannot be unmanaged, then raise an UnmanageInvalidShare exception, specifying a reason for the failure. """ def manage_existing_snapshot(self, snapshot, driver_options): """Brings an existing snapshot under Manila management. If provided snapshot is not valid, then raise a ManageInvalidShareSnapshot exception, specifying a reason for the failure. :param snapshot: ShareSnapshotInstance model with ShareSnapshot data. Example: {'id': , 'snapshot_id': < snapshot id>, 'provider_location': , ......} :param driver_options: Optional driver-specific options provided by admin. Example: {'key': 'value', ......} :return: model_update dictionary with required key 'size', which should contain size of the share snapshot. """ raise NotImplementedError() def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Manila management. Does not delete the underlying backend share snapshot. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share snapshot. If provided share snapshot cannot be unmanaged, then raise an UnmanageInvalidShareSnapshot exception, specifying a reason for the failure. """ def extend_share(self, share, new_size, share_server=None): """Extends size of existing share. :param share: Share model :param new_size: New size of share (new_size > share['size']) :param share_server: Optional -- Share server model """ raise NotImplementedError() def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share. If consumed space on share larger than new_size driver should raise ShareShrinkingPossibleDataLoss exception: raise ShareShrinkingPossibleDataLoss(share_id=share['id']) :param share: Share model :param new_size: New size of share (new_size < share['size']) :param share_server: Optional -- Share server model :raises ShareShrinkingPossibleDataLoss, NotImplementedError """ raise NotImplementedError() def teardown_server(self, *args, **kwargs): if self.driver_handles_share_servers: return self._teardown_server(*args, **kwargs) else: LOG.debug( "Skipping step 'teardown share server', because driver is " "enabled with mode when Manila does not handle share servers.") def _teardown_server(self, server_details, security_services=None): """Tears down share server. Redefine it within share driver when it is going to handle share servers. """ raise NotImplementedError() def _has_redefined_driver_methods(self, methods): """Returns boolean as a result of methods presence and redefinition.""" if not isinstance(methods, (set, list, tuple)): methods = (methods, ) for method_name in methods: method = getattr(type(self), method_name, None) if (not method or method == getattr(ShareDriver, method_name)): return False return True @property def snapshots_are_supported(self): if not hasattr(self, '_snapshots_are_supported'): methods = ( "create_snapshot", "delete_snapshot", "create_share_from_snapshot") # NOTE(vponomaryov): calculate default value for # stat 'snapshot_support' based on implementation of # appropriate methods of this base driver class. self._snapshots_are_supported = self._has_redefined_driver_methods( methods) return self._snapshots_are_supported def _update_share_stats(self, data=None): """Retrieve stats info from share group. :param data: dict -- dict with key-value pairs to redefine common ones. """ LOG.debug("Updating share stats.") backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name) # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. common = dict( share_backend_name=backend_name or 'Generic_NFS', driver_handles_share_servers=self.driver_handles_share_servers, vendor_name='Open Source', driver_version='1.0', storage_protocol=None, total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, qos=False, pools=self.pools or None, snapshot_support=self.snapshots_are_supported, replication_domain=self.replication_domain, ) if isinstance(data, dict): common.update(data) self._stats = common def get_share_server_pools(self, share_server): """Return list of pools related to a particular share server. :param share_server: ShareServer class instance. """ return [] def create_consistency_group(self, context, cg_dict, share_server=None): """Create a consistency group. :param context: :param cg_dict: The consistency group details EXAMPLE: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'openstack2@cmodeSSVMNFS', 'deleted_at': None, 'share_types': [], 'id': 'eda52174-0442-476d-9694-a58327466c14', 'name': None } :returns: (cg_model_update, share_update_list) cg_model_update - a dict containing any values to be updated for the CG in the database. This value may be None. """ raise NotImplementedError() def create_consistency_group_from_cgsnapshot(self, context, cg_dict, cgsnapshot_dict, share_server=None): """Create a consistency group from a cgsnapshot. :param context: :param cg_dict: The consistency group details EXAMPLE: .. code:: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'openstack2@cmodeSSVMNFS', 'deleted_at': None, 'shares': [], # The new shares being created 'share_types': [], 'id': 'eda52174-0442-476d-9694-a58327466c14', 'name': None } :param cgsnapshot_dict: The cgsnapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'cgsnapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'size': 1 } ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } :return: (cg_model_update, share_update_list) cg_model_update - a dict containing any values to be updated for the CG in the database. This value may be None. share_update_list - a list of dictionaries containing dicts for every share created in the CG. Any share dicts should at a minimum contain the 'id' key and 'export_locations'. Export locations should be in the same format as returned by a share_create. This list may be empty or None. EXAMPLE: .. code:: [{'id': 'uuid', 'export_locations': ['export_path']}] """ raise NotImplementedError() def delete_consistency_group(self, context, cg_dict, share_server=None): """Delete a consistency group :param context: The request context :param cg_dict: The consistency group details EXAMPLE: .. code:: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'openstack2@cmodeSSVMNFS', 'deleted_at': None, 'shares': [], # The new shares being created 'share_types': [], 'id': 'eda52174-0442-476d-9694-a58327466c14', 'name': None } :return: cg_model_update cg_model_update - a dict containing any values to be updated for the CG in the database. This value may be None. """ raise NotImplementedError() def create_cgsnapshot(self, context, snap_dict, share_server=None): """Create a consistency group snapshot. :param context: :param snap_dict: The cgsnapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'cgsnapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'size': 1 } ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } :return: (cgsnapshot_update, member_update_list) cgsnapshot_update - a dict containing any values to be updated for the CGSnapshot in the database. This value may be None. member_update_list - a list of dictionaries containing for every member of the cgsnapshot. Each dict should contains values to be updated for teh CGSnapshotMember in the database. This list may be empty or None. """ raise NotImplementedError() def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Delete a consistency group snapshot :param context: :param snap_dict: The cgsnapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'consistency_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'cgsnapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'share_id': 'e14b5174-e534-4f35-bc4f-fe81c1575d6f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'cgsnapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'size': 1 } ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } :return: (cgsnapshot_update, member_update_list) cgsnapshot_update - a dict containing any values to be updated for the CGSnapshot in the database. This value may be None. """ raise NotImplementedError() def get_periodic_hook_data(self, context, share_instances): """Dedicated for update/extend of data for existing share instances. Redefine this method in share driver to be able to update/change/extend share instances data that will be used by periodic hook action. One of possible updates is add-on of "automount" CLI commands for each share instance for case of notification is enabled using 'hook' approach. :param context: Current context :param share_instances: share instances list provided by share manager :return: list of share instances. """ return share_instances def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicate the active replica to a new replica on this backend. NOTE: This call is made on the host that the new replica is being created upon. :param context: Current context :param replica_list: List of all replicas for a particular share. This list also contains the replica to be created. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param new_replica: The share replica dictionary. EXAMPLE: .. code:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'creating', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'out_of_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'out_of_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': 'e6155221-ea00-49ef-abf9-9f89b7dd900a', 'share_server': or None, } :param access_rules: A list of access rules that other instances of the share already obey. Drivers are expected to apply access rules to the new replica or disregard access rules that don't apply. EXAMPLE: .. code:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', }] :param replica_snapshots: List of dictionaries of snapshot instances for each snapshot of the share whose 'aggregate_status' property was reported to be 'available' when the share manager initiated this request. Each list member will have two sub dictionaries: 'active_replica_snapshot' and 'share_replica_snapshot'. The 'active' replica snapshot corresponds to the instance of the snapshot on any of the 'active' replicas of the share while share_replica_snapshot corresponds to the snapshot instance for the specific replica that will need to exist on the new share replica that is being created. The driver needs to ensure that this snapshot instance is truly available before transitioning the replica from 'out_of_sync' to 'in_sync'. Snapshots instances for snapshots that have an 'aggregate_status' of 'creating' or 'deleting' will be polled for in the update_replicated_snapshot method. EXAMPLE: .. code:: [ { 'active_replica_snapshot': { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'status': 'available', 'provider_location': '/newton/share-snapshot-10e49c3e-aca9', ... }, 'share_replica_snapshot': { 'id': '', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'status': 'available', 'provider_location': None, ... }, }] :param share_server: or None, Share server of the replica being created. :return: None or a dictionary containing export_locations, replica_state and access_rules_status. export_locations is a list of paths and replica_state is one of active, in_sync, out_of_sync or error. A backend supporting 'writable' type replication should return 'active' as the replica_state. Export locations should be in the same format as returned during the create_share call. EXAMPLE: .. code:: { 'export_locations': [ { 'path': '172.16.20.22/sample/export/path', 'is_admin_only': False, 'metadata': {'some_key': 'some_value'}, }, ], 'replica_state': 'in_sync', 'access_rules_status': 'in_sync', } """ raise NotImplementedError() def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Delete a replica. NOTE: This call is made on the host that hosts the replica being deleted. :param context: Current context :param replica_list: List of all replicas for a particular share. This list also contains the replica to be deleted. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the share replica being deleted. EXAMPLE: .. code:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations ], 'access_rules_status': 'out_of_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', 'share_server': or None, } :param replica_snapshots: A list of dictionaries containing snapshot instances that are associated with the share replica being deleted. No model updates are possible in this method. The driver should return when the cleanup is completed on the backend for both, the snapshots and the replica itself. Drivers must handle situations where the snapshot may not yet have finished 'creating' on this replica. EXAMPLE: .. code:: [ { 'id': '89dafd00-0999-4d23-8614-13eaa6b02a3b', 'snapshot_id': '3ce1caf7-0945-45fd-a320-714973e949d3', 'status: 'available', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f' ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f' ... }, ... ] :param share_server: or None, Share server of the replica to be deleted. :return: None. :raises Exception. Any exception raised will set the share replica's 'status' and 'replica_state' to 'error_deleting'. It will not affect snapshots belonging to this replica. """ raise NotImplementedError() def promote_replica(self, context, replica_list, replica, access_rules, share_server=None): """Promote a replica to 'active' replica state. NOTE: This call is made on the host that hosts the replica being promoted. :param context: Current context :param replica_list: List of all replicas for a particular share. This list also contains the replica to be promoted. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the replica to be promoted. EXAMPLE: .. code:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, } :param access_rules: A list of access rules that other instances of the share already obey. EXAMPLE: .. code:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', }] :param share_server: or None, Share server of the replica to be promoted. :return: updated_replica_list or None The driver can return the updated list as in the request parameter. Changes that will be updated to the Database are: 'export_locations', 'access_rules_status' and 'replica_state'. :raises Exception This can be any exception derived from BaseException. This is re-raised by the manager after some necessary cleanup. If the driver raises an exception during promotion, it is assumed that all of the replicas of the share are in an inconsistent state. Recovery is only possible through the periodic update call and/or administrator intervention to correct the 'status' of the affected replicas if they become healthy again. """ raise NotImplementedError() def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Update the replica_state of a replica. NOTE: This call is made on the host which hosts the replica being updated. Drivers should fix replication relationships that were broken if possible inside this method. This method is called periodically by the share manager; and whenever requested by the administrator through the 'resync' API. :param context: Current context :param replica_list: List of all replicas for a particular share. This list also contains the replica to be updated. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the replica being updated. Replica state will always be 'in_sync', 'out_of_sync', or 'error'. Replicas in 'active' state will not be passed via this parameter. EXAMPLE: .. code:: { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS1', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'e2c2db5c-cb2f-4697-9966-c06fb200cb80', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', } :param access_rules: A list of access rules that other replicas of the share already obey. The driver could attempt to sync on any un-applied access_rules. EXAMPLE: .. code:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', }] :param replica_snapshots: List of dictionaries of snapshot instances for each snapshot of the share whose 'aggregate_status' property was reported to be 'available' when the share manager initiated this request. Each list member will have two sub dictionaries: 'active_replica_snapshot' and 'share_replica_snapshot'. The 'active' replica snapshot corresponds to the instance of the snapshot on any of the 'active' replicas of the share while share_replica_snapshot corresponds to the snapshot instance for the specific replica being updated. The driver needs to ensure that this snapshot instance is truly available before transitioning from 'out_of_sync' to 'in_sync'. Snapshots instances for snapshots that have an 'aggregate_status' of 'creating' or 'deleting' will be polled for in the update_replicated_snapshot method. EXAMPLE: .. code:: [ { 'active_replica_snapshot': { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'status': 'available', 'provider_location': '/newton/share-snapshot-10e49c3e-aca9', ... }, 'share_replica_snapshot': { 'id': , 'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'status': 'creating', 'provider_location': None, ... }, }] :param share_server: or None :return: replica_state replica_state - a str value denoting the replica_state that the replica can have. Valid values are 'in_sync' and 'out_of_sync' or None (to leave the current replica_state unchanged). """ raise NotImplementedError() def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Create a snapshot on active instance and update across the replicas. NOTE: This call is made on the 'active' replica's host. Drivers are expected to transfer the snapshot created to the respective replicas. The driver is expected to return model updates to the share manager. If it was able to confirm the creation of any number of the snapshot instances passed in this interface, it can set their status to 'available' as a cue for the share manager to set the progress attr to '100%'. :param context: Current context :param replica_list: List of all replicas for a particular share. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param replica_snapshots: List of all snapshot instances that track the snapshot across the replicas. All the instances will have their status attribute set to 'creating'. EXAMPLE: .. code:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'progress': '0%', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'progress': '0%', ... }, ... ] :param share_server: or None :return: List of replica_snapshots, a list of dictionaries containing values that need to be updated on the database for the snapshot instances being created. :raises: Exception. Any exception in this method will set all instances to 'error'. """ raise NotImplementedError() def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Delete a snapshot by deleting its instances across the replicas. NOTE: This call is made on the 'active' replica's host, since drivers may not be able to delete the snapshot from an individual replica. The driver is expected to return model updates to the share manager. If it was able to confirm the removal of any number of the snapshot instances passed in this interface, it can set their status to 'deleted' as a cue for the share manager to clean up that instance from the database. :param context: Current context :param replica_list: List of all replicas for a particular share. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param replica_snapshots: List of all snapshot instances that track the snapshot across the replicas. All the instances will have their status attribute set to 'deleting'. EXAMPLE: .. code:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status': 'deleting', 'progress': '100%', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'deleting', 'progress': '100%', ... }, ... ] :param share_server: or None :return: List of replica_snapshots, a list of dictionaries containing values that need to be updated on the database for the snapshot instances being deleted. To confirm the deletion of the snapshot instance, set the 'status' attribute of the instance to 'deleted'(constants.STATUS_DELETED). :raises: Exception. Any exception in this method will set all instances to 'error_deleting'. """ raise NotImplementedError() def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): """Update the status of a snapshot instance that lives on a replica. NOTE: For DR and Readable styles of replication, this call is made on the replica's host and not the 'active' replica's host. This method is called periodically by the share manager. It will query for snapshot instances that track the parent snapshot across non-'active' replicas. Drivers can expect the status of the instance to be 'creating' or 'deleting'. If the driver sees that a snapshot instance has been removed from the replica's backend and the instance status was set to 'deleting', it is expected to raise a SnapshotResourceNotFound exception. All other exceptions will set the snapshot instance status to 'error'. If the instance was not in 'deleting' state, raising a SnapshotResourceNotFound will set the instance status to 'error'. :param context: Current context :param replica_list: List of all replicas for a particular share. The 'active' replica will have its 'replica_state' attr set to 'active'. EXAMPLE: .. code:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param share_replica: Dictionary of the replica the snapshot instance is meant to be associated with. Replicas in 'active' replica_state will not be passed via this parameter. EXAMPLE: .. code:: { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS1', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'e2c2db5c-cb2f-4697-9966-c06fb200cb80', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', } :param replica_snapshots: List of all snapshot instances that track the snapshot across the replicas. This will include the instance being updated as well. EXAMPLE: .. code:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', ... }, ... ] :param replica_snapshot: Dictionary of the snapshot instance to be updated. replica_snapshot will be in 'creating' or 'deleting' states when sent via this parameter. EXAMPLE: .. code:: { 'name': 'share-snapshot-18825630-574f-4912-93bb-af4611ef35a2', 'share_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_name': 'share-d487b88d-e428-4230-a465-a800c2cce5f8', 'status': 'creating', 'id': '18825630-574f-4912-93bb-af4611ef35a2', 'deleted': False, 'created_at': datetime.datetime(2016, 8, 3, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2016, 8, 3, 0, 5, 58), 'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'progress': '0%', 'deleted_at': None, 'provider_location': None, } :param share_server: or None :return: replica_snapshot_model_update, a dictionary containing values that need to be updated on the database for the snapshot instance that represents the snapshot on the replica. :raises: exception.SnapshotResourceNotFound for snapshots that are not found on the backend and their status was 'deleting'. """ raise NotImplementedError() manila-2.0.0/manila/share/migration.py0000664000567000056710000001724712701407107021033 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper class for Share Migration.""" import time from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.i18n import _LW from manila.share import api as share_api import manila.utils as utils LOG = log.getLogger(__name__) migration_opts = [ cfg.IntOpt( 'migration_wait_access_rules_timeout', default=180, help="Time to wait for access rules to be allowed/denied on backends " "when migrating shares using generic approach (seconds)."), cfg.IntOpt( 'migration_create_delete_share_timeout', default=300, help='Timeout for creating and deleting share instances ' 'when performing share migration (seconds).'), ] CONF = cfg.CONF CONF.register_opts(migration_opts) class ShareMigrationHelper(object): def __init__(self, context, db, share): self.db = db self.share = share self.context = context self.api = share_api.API() self.migration_create_delete_share_timeout = ( CONF.migration_create_delete_share_timeout) self.migration_wait_access_rules_timeout = ( CONF.migration_wait_access_rules_timeout) def delete_instance_and_wait(self, share_instance): self.api.delete_instance(self.context, share_instance, True) # Wait for deletion. starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout tries = 0 instance = "Something not None" while instance is not None: try: instance = self.db.share_instance_get(self.context, share_instance['id']) tries += 1 now = time.time() if now > deadline: msg = _("Timeout trying to delete instance " "%s") % share_instance['id'] raise exception.ShareMigrationFailed(reason=msg) except exception.NotFound: instance = None else: time.sleep(tries ** 2) def create_instance_and_wait(self, share, share_instance, host): new_share_instance = self.api.create_instance( self.context, share, share_instance['share_network_id'], host['host']) # Wait for new_share_instance to become ready starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout new_share_instance = self.db.share_instance_get( self.context, new_share_instance['id'], with_share_data=True) tries = 0 while new_share_instance['status'] != constants.STATUS_AVAILABLE: tries += 1 now = time.time() if new_share_instance['status'] == constants.STATUS_ERROR: msg = _("Failed to create new share instance" " (from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': host['host']} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout creating new share instance " "(from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': host['host']} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) else: time.sleep(tries ** 2) new_share_instance = self.db.share_instance_get( self.context, new_share_instance['id'], with_share_data=True) return new_share_instance # NOTE(ganso): Cleanup methods do not throw exceptions, since the # exceptions that should be thrown are the ones that call the cleanup def cleanup_new_instance(self, new_instance): try: self.delete_instance_and_wait(new_instance) except Exception: LOG.warning(_LW("Failed to cleanup new instance during generic" " migration for share %s."), self.share['id']) def cleanup_access_rules(self, share_instance, share_server, driver): try: self.revert_access_rules(share_instance, share_server, driver) except Exception: LOG.warning(_LW("Failed to cleanup access rules during generic" " migration for share %s."), self.share['id']) def change_to_read_only(self, share_instance, share_server, readonly_support, driver): # NOTE(ganso): If the share does not allow readonly mode we # should remove all access rules and prevent any access rules = self.db.share_access_get_all_for_instance( self.context, share_instance['id']) if len(rules) > 0: if readonly_support: LOG.debug("Changing all of share %s access rules " "to read-only.", self.share['id']) for rule in rules: rule['access_level'] = 'ro' driver.update_access(self.context, share_instance, rules, add_rules=[], delete_rules=[], share_server=share_server) else: LOG.debug("Removing all access rules for migration of " "share %s." % self.share['id']) driver.update_access(self.context, share_instance, [], add_rules=[], delete_rules=rules, share_server=share_server) def revert_access_rules(self, share_instance, share_server, driver): rules = self.db.share_access_get_all_for_instance( self.context, share_instance['id']) if len(rules) > 0: LOG.debug("Restoring all of share %s access rules according to " "DB.", self.share['id']) driver.update_access(self.context, share_instance, rules, add_rules=[], delete_rules=[], share_server=share_server) def apply_new_access_rules(self, new_share_instance): self.db.share_instance_access_copy(self.context, self.share['id'], new_share_instance['id']) rules = self.db.share_access_get_all_for_instance( self.context, new_share_instance['id']) if len(rules) > 0: LOG.debug("Restoring all of share %s access rules according to " "DB.", self.share['id']) self.api.allow_access_to_instance(self.context, new_share_instance, rules) utils.wait_for_access_update( self.context, self.db, new_share_instance, self.migration_wait_access_rules_timeout) manila-2.0.0/manila/share/hooks/0000775000567000056710000000000012701407265017605 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/hooks/__init__.py0000664000567000056710000000000012701407107021677 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/share/rpcapi.py0000664000567000056710000003433112701407112020305 0ustar jenkinsjenkins00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the share RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from manila import rpc from manila.share import utils CONF = cfg.CONF class ShareAPI(object): """Client side of the share rpc API. API version history: 1.0 - Initial version. 1.1 - Add manage_share() and unmanage_share() methods 1.2 - Add extend_share() method 1.3 - Add shrink_share() method 1.4 - Introduce Share Instances: create_share() -> create_share_instance() delete_share() -> delete_share_instance() Add share_instance argument to allow_access() & deny_access() 1.5 - Add create_consistency_group, delete_consistency_group create_cgsnapshot, and delete_cgsnapshot methods 1.6 - Introduce Share migration: migrate_share() get_migration_info() get_driver_migration_info() 1.7 - Update target call API in allow/deny access methods 1.8 - Introduce Share Replication: create_share_replica() delete_share_replica() promote_share_replica() update_share_replica() 1.9 - Add manage_snapshot() and unmanage_snapshot() methods 1.10 - Add migration_complete(), migration_cancel() and migration_get_progress(), rename migrate_share() to migration_start(), rename get_migration_info() to migration_get_info(), rename get_driver_migration_info() to migration_get_driver_info() 1.11 - Add create_replicated_snapshot() and delete_replicated_snapshot() methods """ BASE_RPC_API_VERSION = '1.0' def __init__(self, topic=None): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.11') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, snapshot_id=None): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.4') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_instance', share_instance_id=share_instance['id'], request_spec=request_spec_p, filter_properties=filter_properties, snapshot_id=snapshot_id) def manage_share(self, context, share, driver_options=None): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.1') call_context.cast(context, 'manage_share', share_id=share['id'], driver_options=driver_options) def unmanage_share(self, context, share): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.1') call_context.cast(context, 'unmanage_share', share_id=share['id']) def manage_snapshot(self, context, snapshot, host, driver_options=None): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.9') call_context.cast(context, 'manage_snapshot', snapshot_id=snapshot['id'], driver_options=driver_options) def unmanage_snapshot(self, context, snapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.9') call_context.cast(context, 'unmanage_snapshot', snapshot_id=snapshot['id']) def delete_share_instance(self, context, share_instance, force=False): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.4') call_context.cast(context, 'delete_share_instance', share_instance_id=share_instance['id'], force=force) def migration_start(self, context, share, dest_host, force_host_copy, notify): new_host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=new_host, version='1.6') host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} call_context.cast(context, 'migration_start', share_id=share['id'], host=host_p, force_host_copy=force_host_copy, notify=notify) def migration_get_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.6') return call_context.call(context, 'migration_get_info', share_instance_id=share_instance['id']) def migration_get_driver_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.6') return call_context.call(context, 'migration_get_driver_info', share_instance_id=share_instance['id']) def delete_share_server(self, context, share_server): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.0') call_context.cast(context, 'delete_share_server', share_server=share_server) def create_snapshot(self, context, share, snapshot): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host) call_context.cast(context, 'create_snapshot', share_id=share['id'], snapshot_id=snapshot['id']) def delete_snapshot(self, context, snapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host) call_context.cast(context, 'delete_snapshot', snapshot_id=snapshot['id']) def create_replicated_snapshot(self, context, share, replicated_snapshot): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'create_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share['id']) def delete_replicated_snapshot(self, context, replicated_snapshot, host, share_id=None, force=False): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'delete_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share_id, force=force) @staticmethod def _get_access_rules(access): if isinstance(access, list): return [rule['id'] for rule in access] else: return [access['id']] def allow_access(self, context, share_instance, access): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.7') call_context.cast(context, 'allow_access', share_instance_id=share_instance['id'], access_rules=self._get_access_rules(access)) def deny_access(self, context, share_instance, access): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.7') call_context.cast(context, 'deny_access', share_instance_id=share_instance['id'], access_rules=self._get_access_rules(access)) def publish_service_capabilities(self, context): call_context = self.client.prepare(fanout=True, version='1.0') call_context.cast(context, 'publish_service_capabilities') def extend_share(self, context, share, new_size, reservations): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.2') call_context.cast(context, 'extend_share', share_id=share['id'], new_size=new_size, reservations=reservations) def shrink_share(self, context, share, new_size): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.3') call_context.cast(context, 'shrink_share', share_id=share['id'], new_size=new_size) def create_consistency_group(self, context, cg, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.5') call_context.cast(context, 'create_consistency_group', cg_id=cg['id']) def delete_consistency_group(self, context, cg): new_host = utils.extract_host(cg['host']) call_context = self.client.prepare(server=new_host, version='1.5') call_context.cast(context, 'delete_consistency_group', cg_id=cg['id']) def create_cgsnapshot(self, context, cgsnapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.5') call_context.cast(context, 'create_cgsnapshot', cgsnapshot_id=cgsnapshot['id']) def delete_cgsnapshot(self, context, cgsnapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.5') call_context.cast(context, 'delete_cgsnapshot', cgsnapshot_id=cgsnapshot['id']) def create_share_replica(self, context, share_replica, host, request_spec, filter_properties): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.8') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_replica', share_replica_id=share_replica['id'], request_spec=request_spec_p, filter_properties=filter_properties, share_id=share_replica['share_id']) def delete_share_replica(self, context, share_replica, force=False): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'delete_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id'], force=force) def promote_share_replica(self, context, share_replica): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'promote_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id']) def update_share_replica(self, context, share_replica): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'update_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id']) def migration_complete(self, context, share, share_instance_id, new_share_instance_id): new_host = utils.extract_host(share['host']) call_context = self.client.prepare(server=new_host, version='1.10') call_context.cast(context, 'migration_complete', share_id=share['id'], share_instance_id=share_instance_id, new_share_instance_id=new_share_instance_id) def migration_cancel(self, context, share): new_host = utils.extract_host(share['host']) call_context = self.client.prepare(server=new_host, version='1.10') call_context.call(context, 'migration_cancel', share_id=share['id']) def migration_get_progress(self, context, share): new_host = utils.extract_host(share['host']) call_context = self.client.prepare(server=new_host, version='1.10') return call_context.call(context, 'migration_get_progress', share_id=share['id']) manila-2.0.0/manila/share/configuration.py0000664000567000056710000000475412701407107021710 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 Rackspace Hosting # Copyright (c) 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration support for all drivers. This module allows support for setting configurations either from default or from a particular CONF group, to be able to set multiple configurations for a given set of values. For instance, two generic configurations can be set by naming them in groups as [generic1] share_backend_name=generic-backend-1 ... [generic2] share_backend_name=generic-backend-2 ... And the configuration group name will be passed in so that all calls to configuration.volume_group within that instance will be mapped to the proper named group. This class also ensures the implementation's configuration is grafted into the option group. This is due to the way cfg works. All cfg options must be defined and registered in the group in which they are used. """ from oslo_config import cfg CONF = cfg.CONF class Configuration(object): def __init__(self, share_opts, config_group=None): """Graft config values into config group. This takes care of grafting the implementation's config values into the config group. """ self.config_group = config_group # set the local conf so that __call__'s know what to use if self.config_group: self._ensure_config_values(share_opts) self.local_conf = CONF._get(self.config_group) else: self.local_conf = CONF def _ensure_config_values(self, share_opts): CONF.register_opts(share_opts, group=self.config_group) def append_config_values(self, share_opts): self._ensure_config_values(share_opts) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, value): return getattr(self.local_conf, value) manila-2.0.0/manila/share/share_types.py0000664000567000056710000002602212701407107021357 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in share type properties.""" import re from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import six from manila.common import constants from manila import context from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LE CONF = cfg.CONF LOG = log.getLogger(__name__) def create(context, name, extra_specs=None, is_public=True, projects=None): """Creates share types.""" extra_specs = extra_specs or {} projects = projects or [] if constants.ExtraSpecs.SNAPSHOT_SUPPORT not in list(extra_specs): extra_specs[constants.ExtraSpecs.SNAPSHOT_SUPPORT] = 'True' try: get_valid_required_extra_specs(extra_specs) except exception.InvalidExtraSpec as e: raise exception.InvalidShareType(reason=six.text_type(e)) try: type_ref = db.share_type_create(context, dict(name=name, extra_specs=extra_specs, is_public=is_public), projects=projects) except db_exception.DBError as e: LOG.exception(_LE('DB error: %s'), e) raise exception.ShareTypeCreateFailed(name=name, extra_specs=extra_specs) return type_ref def destroy(context, id): """Marks share types as deleted.""" if id is None: msg = _("id cannot be None") raise exception.InvalidShareType(reason=msg) else: db.share_type_destroy(context, id) def get_all_types(context, inactive=0, search_opts=None): """Get all non-deleted share_types. """ search_opts = search_opts or {} filters = {} if 'is_public' in search_opts: filters['is_public'] = search_opts.pop('is_public') share_types = db.share_type_get_all(context, inactive, filters=filters) for type_name, type_args in share_types.items(): required_extra_specs = {} try: required_extra_specs = get_valid_required_extra_specs( type_args['extra_specs']) except exception.InvalidExtraSpec as e: values = { 'share_type': type_name, 'error': six.text_type(e) } LOG.exception(_LE('Share type %(share_type)s has invalid required' ' extra specs: %(error)s'), values) type_args['required_extra_specs'] = required_extra_specs if search_opts: LOG.debug("Searching by: %s", search_opts) def _check_extra_specs_match(share_type, searchdict): for k, v in searchdict.items(): if (k not in share_type['extra_specs'].keys() or share_type['extra_specs'][k] != v): return False return True # search_option to filter_name mapping. filter_mapping = {'extra_specs': _check_extra_specs_match} result = {} for type_name, type_args in share_types.items(): # go over all filters in the list for opt, values in search_opts.items(): try: filter_func = filter_mapping[opt] except KeyError: # no such filter - ignore it, go to next filter continue else: if filter_func(type_args, values): result[type_name] = type_args break share_types = result return share_types def get_share_type(ctxt, id, expected_fields=None): """Retrieves single share type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidShareType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.share_type_get(ctxt, id, expected_fields=expected_fields) def get_share_type_by_name(context, name): """Retrieves single share type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidShareType(reason=msg) return db.share_type_get_by_name(context, name) def get_share_type_by_name_or_id(context, share_type=None): if not share_type: share_type_ref = get_default_share_type(context) if not share_type_ref: msg = _("Default share type not found") raise exception.ShareTypeNotFound(reason=msg) return share_type_ref if uuidutils.is_uuid_like(share_type): return get_share_type(context, share_type) else: return get_share_type_by_name(context, share_type) def get_default_share_type(ctxt=None): """Get the default share type.""" name = CONF.default_share_type if name is None: return {} if ctxt is None: ctxt = context.get_admin_context() try: return get_share_type_by_name(ctxt, name) except exception.ShareTypeNotFoundByName as e: # Couldn't find share type with the name in default_share_type # flag, record this issue and move on # TODO(zhiteng) consider add notification to warn admin LOG.exception(_LE('Default share type is not found, ' 'please check default_share_type config: %s'), e) def get_share_type_extra_specs(share_type_id, key=False): share_type = get_share_type(context.get_admin_context(), share_type_id) extra_specs = share_type['extra_specs'] if key: if extra_specs.get(key): return extra_specs.get(key) else: return False else: return extra_specs def get_required_extra_specs(): return constants.ExtraSpecs.REQUIRED def get_undeletable_extra_specs(): return constants.ExtraSpecs.UNDELETABLE def get_tenant_visible_extra_specs(): return constants.ExtraSpecs.TENANT_VISIBLE def get_boolean_extra_specs(): return constants.ExtraSpecs.BOOLEAN def is_valid_required_extra_spec(key, value): """Validates required extra_spec value. :param key: extra_spec name :param value: extra_spec value :return: None if provided extra_spec is not required True/False if extra_spec is required and valid or not. """ if key not in get_required_extra_specs(): return if key == constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: return strutils.bool_from_string(value, default=None) is not None return False def get_valid_required_extra_specs(extra_specs): """Returns required extra specs from dict. Returns None if extra specs are not valid, or if some required extras specs is missed. """ extra_specs = extra_specs or {} missed_extra_specs = set(get_required_extra_specs()) - set(extra_specs) if missed_extra_specs: specs = ",".join(missed_extra_specs) msg = _("Required extra specs '%s' not specified.") % specs raise exception.InvalidExtraSpec(reason=msg) required_extra_specs = {} for k in get_required_extra_specs(): value = extra_specs.get(k, '') if not is_valid_required_extra_spec(k, value): msg = _("Value of required extra_spec %s is not valid") % k raise exception.InvalidExtraSpec(reason=msg) required_extra_specs[k] = value return required_extra_specs def add_share_type_access(context, share_type_id, project_id): """Add access to share type for project_id.""" if share_type_id is None: msg = _("share_type_id cannot be None") raise exception.InvalidShareType(reason=msg) return db.share_type_access_add(context, share_type_id, project_id) def remove_share_type_access(context, share_type_id, project_id): """Remove access to share type for project_id.""" if share_type_id is None: msg = _("share_type_id cannot be None") raise exception.InvalidShareType(reason=msg) return db.share_type_access_remove(context, share_type_id, project_id) def share_types_diff(context, share_type_id1, share_type_id2): """Returns a 'diff' of two share types and whether they are equal. Returns a tuple of (diff, equal), where 'equal' is a boolean indicating whether there is any difference, and 'diff' is a dictionary with the following format: {'extra_specs': { 'key1': (value_in_1st_share_type, value_in_2nd_share_type), 'key2': (value_in_1st_share_type, value_in_2nd_share_type), ...} """ def _dict_diff(dict1, dict2): res = {} equal = True if dict1 is None: dict1 = {} if dict2 is None: dict2 = {} for k, v in dict1.items(): res[k] = (v, dict2.get(k)) if k not in dict2 or res[k][0] != res[k][1]: equal = False for k, v in dict2.items(): res[k] = (dict1.get(k), v) if k not in dict1 or res[k][0] != res[k][1]: equal = False return (res, equal) all_equal = True diff = {} share_type1 = get_share_type(context, share_type_id1) share_type2 = get_share_type(context, share_type_id2) extra_specs1 = share_type1.get('extra_specs') extra_specs2 = share_type2.get('extra_specs') diff['extra_specs'], equal = _dict_diff(extra_specs1, extra_specs2) if not equal: all_equal = False return (diff, all_equal) def get_extra_specs_from_share(share): type_id = share.get('share_type_id', None) return get_share_type_extra_specs(type_id) def parse_boolean_extra_spec(extra_spec_key, extra_spec_value): """Parse extra spec values of the form ' True' or ' False' This method returns the boolean value of an extra spec value. If the value does not conform to the standard boolean pattern, it raises an InvalidExtraSpec exception. """ try: if not isinstance(extra_spec_value, six.string_types): raise ValueError match = re.match(r'^\s*(?PTrue|False)$', extra_spec_value.strip(), re.IGNORECASE) if not match: raise ValueError else: return strutils.bool_from_string(match.group('value'), strict=True) except ValueError: msg = (_('Invalid boolean extra spec %(key)s : %(value)s') % {'key': extra_spec_key, 'value': extra_spec_value}) raise exception.InvalidExtraSpec(reason=msg) manila-2.0.0/manila/share/api.py0000664000567000056710000017375412701407112017615 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to shares. """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils import six from manila.api import extensions from manila.common import constants from manila.data import rpcapi as data_rpcapi from manila.db import base from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila import policy from manila import quota from manila.scheduler import rpcapi as scheduler_rpcapi from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila.share import utils as share_utils from manila import utils share_api_opts = [ cfg.BoolOpt('use_scheduler_creating_share_from_snapshot', default=False, help='If set to False, then share creation from snapshot will ' 'be performed on the same host. ' 'If set to True, then scheduling step will be used.') ] CONF = cfg.CONF CONF.register_opts(share_api_opts) LOG = log.getLogger(__name__) GB = 1048576 * 1024 QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting with the share manager.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.share_rpcapi = share_rpcapi.ShareAPI() super(API, self).__init__(db_driver) def create(self, context, share_proto, size, name, description, snapshot_id=None, availability_zone=None, metadata=None, share_network_id=None, share_type=None, is_public=False, consistency_group_id=None, cgsnapshot_member=None): """Create new share.""" policy.check_policy(context, 'share', 'create') self._check_metadata_properties(context, metadata) if snapshot_id is not None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['aggregate_status'] != constants.STATUS_AVAILABLE: msg = _("status must be '%s'") % constants.STATUS_AVAILABLE raise exception.InvalidShareSnapshot(reason=msg) if not size: size = snapshot['size'] else: snapshot = None def as_int(s): try: return int(s) except (ValueError, TypeError): return s # tolerate size as stringified int size = as_int(size) if not isinstance(size, int) or size <= 0: msg = (_("Share size '%s' must be an integer and greater than 0") % size) raise exception.InvalidInput(reason=msg) if snapshot and size < snapshot['size']: msg = (_("Share size '%s' must be equal or greater " "than snapshot size") % size) raise exception.InvalidInput(reason=msg) if snapshot is None: share_type_id = share_type['id'] if share_type else None else: source_share = self.db.share_get(context, snapshot['share_id']) availability_zone = source_share['instance']['availability_zone'] if share_type is None: # Grab the source share's share_type if no new share type # has been provided. share_type_id = source_share['share_type_id'] share_type = share_types.get_share_type(context, share_type_id) else: share_type_id = share_type['id'] if share_type_id != source_share['share_type_id']: msg = _("Invalid share type specified: the requested " "share type must match the type of the source " "share. If a share type is not specified when " "requesting a new share from a snapshot, the " "share type of the source share will be applied " "to the new share.") raise exception.InvalidInput(reason=msg) supported_share_protocols = ( proto.upper() for proto in CONF.enabled_share_protocols) if not (share_proto and share_proto.upper() in supported_share_protocols): msg = (_("Invalid share protocol provided: %(provided)s. " "It is either disabled or unsupported. Available " "protocols: %(supported)s") % dict( provided=share_proto, supported=CONF.enabled_share_protocols)) raise exception.InvalidInput(reason=msg) try: reservations = QUOTAS.reserve(context, shares=1, gigabytes=size) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'gigabytes' in overs: LOG.warning(_LW("Quota exceeded for %(s_pid)s, " "tried to create " "%(s_size)sG share (%(d_consumed)dG of " "%(d_quota)dG already consumed)."), { 's_pid': context.project_id, 's_size': size, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.ShareSizeExceedsAvailableQuota() elif 'shares' in overs: LOG.warning(_LW("Quota exceeded for %(s_pid)s, " "tried to create " "share (%(d_consumed)d shares " "already consumed)."), { 's_pid': context.project_id, 'd_consumed': _consumed('shares')}) raise exception.ShareLimitExceeded(allowed=quotas['shares']) try: is_public = strutils.bool_from_string(is_public, strict=True) snapshot_support = strutils.bool_from_string( share_type.get('extra_specs', {}).get( 'snapshot_support', True) if share_type else True, strict=True) replication_type = share_type.get('extra_specs', {}).get( 'replication_type') if share_type else None except ValueError as e: raise exception.InvalidParameterValue(six.text_type(e)) consistency_group = None if consistency_group_id: try: consistency_group = self.db.consistency_group_get( context, consistency_group_id) except exception.NotFound as e: raise exception.InvalidParameterValue(six.text_type(e)) if (not cgsnapshot_member and not (consistency_group['status'] == constants.STATUS_AVAILABLE)): params = { 'avail': constants.STATUS_AVAILABLE, 'cg_status': consistency_group['status'], } msg = _("Consistency group status must be %(avail)s, got" "%(cg_status)s.") % params raise exception.InvalidConsistencyGroup(message=msg) if share_type_id: cg_st_ids = [st['share_type_id'] for st in consistency_group.get('share_types', [])] if share_type_id not in cg_st_ids: params = { 'type': share_type_id, 'cg': consistency_group_id } msg = _("The specified share type (%(type)s) is not " "supported by the specified consistency group " "(%(cg)s).") % params raise exception.InvalidParameterValue(msg) if (not consistency_group.get('share_network_id') == share_network_id): params = { 'net': share_network_id, 'cg': consistency_group_id } msg = _("The specified share network (%(net)s) is not " "supported by the specified consistency group " "(%(cg)s).") % params raise exception.InvalidParameterValue(msg) options = {'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, 'snapshot_support': snapshot_support, 'replication_type': replication_type, 'metadata': metadata, 'display_name': name, 'display_description': description, 'share_proto': share_proto, 'share_type_id': share_type_id, 'is_public': is_public, 'consistency_group_id': consistency_group_id, } if cgsnapshot_member: options['source_cgsnapshot_member_id'] = cgsnapshot_member['id'] try: share = self.db.share_create(context, options, create_share_instance=False) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.share_delete(context, share['id']) finally: QUOTAS.rollback(context, reservations) host = None if snapshot and not CONF.use_scheduler_creating_share_from_snapshot: # Shares from snapshots with restriction - source host only. # It is common situation for different types of backends. host = snapshot['share']['instance']['host'] self.create_instance(context, share, share_network_id=share_network_id, host=host, availability_zone=availability_zone, consistency_group=consistency_group, cgsnapshot_member=cgsnapshot_member) # Retrieve the share with instance details share = self.db.share_get(context, share['id']) return share def create_instance(self, context, share, share_network_id=None, host=None, availability_zone=None, consistency_group=None, cgsnapshot_member=None): policy.check_policy(context, 'share', 'create') request_spec, share_instance = ( self._create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, consistency_group=consistency_group, host=host, share_network_id=share_network_id)) if cgsnapshot_member: host = cgsnapshot_member['share']['host'] share = self.db.share_instance_update(context, share_instance['id'], {'host': host}) # NOTE(ameade): Do not cast to driver if creating from cgsnapshot return if host: self.share_rpcapi.create_share_instance( context, share_instance, host, request_spec=request_spec, filter_properties={}, snapshot_id=share['snapshot_id'], ) else: # Create share instance from scratch or from snapshot could happen # on hosts other than the source host. self.scheduler_rpcapi.create_share_instance( context, request_spec=request_spec, filter_properties={}) return share_instance def _create_share_instance_and_get_request_spec( self, context, share, availability_zone=None, consistency_group=None, host=None, share_network_id=None): availability_zone_id = None if availability_zone: availability_zone_id = self.db.availability_zone_get( context, availability_zone).id # TODO(u_glide): Add here validation that provided share network # doesn't conflict with provided availability_zone when Neutron # will have AZ support. share_instance = self.db.share_instance_create( context, share['id'], { 'share_network_id': share_network_id, 'status': constants.STATUS_CREATING, 'scheduled_at': timeutils.utcnow(), 'host': host if host else '', 'availability_zone_id': availability_zone_id, } ) share_properties = { 'id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'metadata': self.db.share_metadata_get(context, share['id']), 'share_server_id': share_instance['share_server_id'], 'snapshot_support': share['snapshot_support'], 'share_proto': share['share_proto'], 'share_type_id': share['share_type_id'], 'is_public': share['is_public'], 'consistency_group_id': share['consistency_group_id'], 'source_cgsnapshot_member_id': share[ 'source_cgsnapshot_member_id'], 'snapshot_id': share['snapshot_id'], 'replication_type': share['replication_type'], } share_instance_properties = { 'id': share_instance['id'], 'availability_zone_id': share_instance['availability_zone_id'], 'share_network_id': share_instance['share_network_id'], 'share_server_id': share_instance['share_server_id'], 'share_id': share_instance['share_id'], 'host': share_instance['host'], 'status': share_instance['status'], 'replica_state': share_instance['replica_state'], } share_type = None if share['share_type_id']: share_type = self.db.share_type_get( context, share['share_type_id']) request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_proto': share['share_proto'], 'share_id': share['id'], 'snapshot_id': share['snapshot_id'], 'share_type': share_type, 'consistency_group': consistency_group, 'availability_zone_id': availability_zone_id, } return request_spec, share_instance def create_share_replica(self, context, share, availability_zone=None, share_network_id=None): if not share.get('replication_type'): msg = _("Replication not supported for share %s.") raise exception.InvalidShare(message=msg % share['id']) self._check_is_share_busy(share) active_replica = self.db.share_replicas_get_available_active_replica( context, share['id']) if not active_replica: msg = _("Share %s does not have any active replica in available " "state.") raise exception.ReplicationException(reason=msg % share['id']) request_spec, share_replica = ( self._create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, share_network_id=share_network_id)) all_replicas = self.db.share_replicas_get_all_by_share( context, share['id']) all_hosts = [r['host'] for r in all_replicas] request_spec['active_replica_host'] = active_replica['host'] request_spec['all_replica_hosts'] = ','.join(all_hosts) self.db.share_replica_update( context, share_replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) existing_snapshots = ( self.db.share_snapshot_get_all_for_share( context, share_replica['share_id']) ) snapshot_instance = { 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_instance_id': share_replica['id'], } for snapshot in existing_snapshots: self.db.share_snapshot_instance_create( context, snapshot['id'], snapshot_instance) self.scheduler_rpcapi.create_share_replica( context, request_spec=request_spec, filter_properties={}) return share_replica def delete_share_replica(self, context, share_replica, force=False): # Disallow deletion of ONLY active replica, *even* when this # operation is forced. replicas = self.db.share_replicas_get_all_by_share( context, share_replica['share_id']) active_replicas = list(filter( lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, replicas)) if (share_replica.get('replica_state') == constants.REPLICA_STATE_ACTIVE and len(active_replicas) == 1): msg = _("Cannot delete last active replica.") raise exception.ReplicationException(reason=msg) LOG.info(_LI("Deleting replica %s."), id) self.db.share_replica_update( context, share_replica['id'], { 'status': constants.STATUS_DELETING, 'terminated_at': timeutils.utcnow(), } ) if not share_replica['host']: # Delete any snapshot instances created on the database replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica['id']}) ) for snapshot in replica_snapshots: self.db.share_snapshot_instance_delete(context, snapshot['id']) # Delete the replica from the database self.db.share_replica_delete(context, share_replica['id']) else: self.share_rpcapi.delete_share_replica(context, share_replica, force=force) def promote_share_replica(self, context, share_replica): if share_replica.get('status') != constants.STATUS_AVAILABLE: msg = _("Replica %(replica_id)s must be in %(status)s state to be " "promoted.") raise exception.ReplicationException( reason=msg % {'replica_id': share_replica['id'], 'status': constants.STATUS_AVAILABLE}) replica_state = share_replica['replica_state'] if (replica_state in (constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) and not context.is_admin): msg = _("Promoting a replica with 'replica_state': %s requires " "administrator privileges.") raise exception.AdminRequired( message=msg % replica_state) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_REPLICATION_CHANGE}) self.share_rpcapi.promote_share_replica(context, share_replica) return self.db.share_replica_get(context, share_replica['id']) def update_share_replica(self, context, share_replica): if not share_replica['host']: msg = _("Share replica does not have a valid host.") raise exception.InvalidHost(reason=msg) self.share_rpcapi.update_share_replica(context, share_replica) def manage(self, context, share_data, driver_options): policy.check_policy(context, 'share', 'manage') shares = self.get_all(context, { 'host': share_data['host'], 'export_location': share_data['export_location'], 'share_proto': share_data['share_proto'], 'share_type_id': share_data['share_type_id'] }) share_type = {} share_type_id = share_data['share_type_id'] if share_type_id: share_type = share_types.get_share_type(context, share_type_id) snapshot_support = strutils.bool_from_string( share_type.get('extra_specs', {}).get( 'snapshot_support', True) if share_type else True, strict=True) share_data.update({ 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_MANAGING, 'scheduled_at': timeutils.utcnow(), 'snapshot_support': snapshot_support, }) LOG.debug("Manage: Found shares %s.", len(shares)) retry_states = (constants.STATUS_MANAGE_ERROR,) export_location = share_data.pop('export_location') if len(shares) == 0: share = self.db.share_create(context, share_data) # NOTE(u_glide): Case when administrator have fixed some problems and # tries to manage share again elif len(shares) == 1 and shares[0]['status'] in retry_states: share = self.db.share_update(context, shares[0]['id'], share_data) else: msg = _("Share already exists.") raise exception.ManilaException(msg) self.db.share_export_locations_update(context, share.instance['id'], export_location) request_spec = self._get_request_spec_dict(share, share_type, size=0) try: self.scheduler_rpcapi.manage_share(context, share['id'], driver_options, request_spec) except Exception: msg = _('Host %(host)s did not pass validation for managing of ' 'share %(share)s with type %(type)s.') % { 'host': share['host'], 'share': share['id'], 'type': share['share_type_id']} raise exception.InvalidHost(reason=msg) return self.db.share_get(context, share['id']) def _get_request_spec_dict(self, share, share_type, **kwargs): share_instance = share['instance'] share_properties = { 'size': kwargs.get('size', share['size']), 'user_id': kwargs.get('user_id', share['user_id']), 'project_id': kwargs.get('project_id', share['project_id']), 'snapshot_support': kwargs.get( 'snapshot_support', share_type['extra_specs']['snapshot_support']), 'share_proto': kwargs.get('share_proto', share['share_proto']), 'share_type_id': kwargs.get('share_type_id', share['share_type_id']), 'is_public': kwargs.get('is_public', share['is_public']), 'consistency_group_id': kwargs.get('consistency_group_id', share['consistency_group_id']), 'source_cgsnapshot_member_id': kwargs.get( 'source_cgsnapshot_member_id', share['source_cgsnapshot_member_id']), 'snapshot_id': kwargs.get('snapshot_id', share['snapshot_id']), } share_instance_properties = { 'availability_zone_id': kwargs.get( 'availability_zone_id', share_instance['availability_zone_id']), 'share_network_id': kwargs.get('share_network_id', share_instance['share_network_id']), 'share_server_id': kwargs.get('share_server_id', share_instance['share_server_id']), 'share_id': kwargs.get('share_id', share_instance['share_id']), 'host': kwargs.get('host', share_instance['host']), 'status': kwargs.get('status', share_instance['status']), } request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_type': share_type, 'share_id': share['id'] } return request_spec def unmanage(self, context, share): policy.check_policy(context, 'share', 'unmanage') self._check_is_share_busy(share) update_data = {'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow()} share_ref = self.db.share_update(context, share['id'], update_data) self.share_rpcapi.unmanage_share(context, share_ref) # NOTE(u_glide): We should update 'updated_at' timestamp of # share server here, when manage/unmanage operations will be supported # for driver_handles_share_servers=True mode def manage_snapshot(self, context, snapshot_data, driver_options): try: share = self.db.share_get(context, snapshot_data['share_id']) except exception.NotFound: raise exception.ShareNotFound(share_id=snapshot_data['share_id']) existing_snapshots = self.db.share_snapshot_get_all_for_share( context, snapshot_data['share_id']) for existing_snap in existing_snapshots: for inst in existing_snap.get('instances'): if (snapshot_data['provider_location'] == inst['provider_location']): msg = _("A share snapshot %(share_snapshot_id)s is " "already managed for provider location " "%(provider_location)s.") % { 'share_snapshot_id': existing_snap['id'], 'provider_location': snapshot_data['provider_location'], } raise exception.ManageInvalidShareSnapshot( reason=msg) snapshot_data.update({ 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_MANAGING, 'share_size': share['size'], 'progress': '0%', 'share_proto': share['share_proto'] }) snapshot = self.db.share_snapshot_create(context, snapshot_data) self.share_rpcapi.manage_snapshot(context, snapshot, share['host'], driver_options) return snapshot def unmanage_snapshot(self, context, snapshot, host): update_data = {'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow()} snapshot_ref = self.db.share_snapshot_update(context, snapshot['id'], update_data) self.share_rpcapi.unmanage_snapshot(context, snapshot_ref, host) @policy.wrap_check_policy('share') def delete(self, context, share, force=False): """Delete share.""" share = self.db.share_get(context, share['id']) if context.is_admin and context.project_id != share['project_id']: project_id = share['project_id'] else: project_id = context.project_id share_id = share['id'] statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if not (force or share['status'] in statuses): msg = _("Share status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShare(reason=msg) # NOTE(gouthamr): If the share has more than one replica, # it can't be deleted until the additional replicas are removed. if share.has_replicas: msg = _("Share %s has replicas. Remove the replicas before " "deleting the share.") % share_id raise exception.Conflict(err=msg) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) if len(snapshots): msg = _("Share still has %d dependent snapshots") % len(snapshots) raise exception.InvalidShare(reason=msg) cgsnapshot_members_count = self.db.count_cgsnapshot_members_in_share( context, share_id) if cgsnapshot_members_count: msg = (_("Share still has %d dependent cgsnapshot members") % cgsnapshot_members_count) raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) try: # we give the user_id of the share, to update the quota usage # for the user, who created the share reservations = QUOTAS.reserve(context, project_id=project_id, shares=-1, gigabytes=-share['size'], user_id=share['user_id']) except Exception as e: reservations = None LOG.exception( _LE("Failed to update quota for deleting share: %s"), six.text_type(e) ) for share_instance in share.instances: if share_instance['host']: self.delete_instance(context, share_instance, force=force) else: self.db.share_instance_delete(context, share_instance['id']) if reservations: # we give the user_id of the share, to update the quota usage # for the user, who created the share QUOTAS.commit(context, reservations, project_id=project_id, user_id=share['user_id']) def delete_instance(self, context, share_instance, force=False): policy.check_policy(context, 'share', 'delete') statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if not (force or share_instance['status'] in statuses): msg = _("Share instance status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShareInstance(reason=msg) share_instance = self.db.share_instance_update( context, share_instance['id'], {'status': constants.STATUS_DELETING, 'terminated_at': timeutils.utcnow()} ) self.share_rpcapi.delete_share_instance(context, share_instance, force=force) # NOTE(u_glide): 'updated_at' timestamp is used to track last usage of # share server. This is required for automatic share servers cleanup # because we should track somehow period of time when share server # doesn't have shares (unused). We do this update only on share # deletion because share server with shares cannot be deleted, so no # need to do this update on share creation or any other share operation if share_instance['share_server_id']: self.db.share_server_update( context, share_instance['share_server_id'], {'updated_at': timeutils.utcnow()}) def delete_share_server(self, context, server): """Delete share server.""" policy.check_policy(context, 'share_server', 'delete', server) shares = self.db.share_instances_get_all_by_share_server(context, server['id']) if shares: raise exception.ShareServerInUse(share_server_id=server['id']) cgs = self.db.consistency_group_get_all_by_share_server(context, server['id']) if cgs: LOG.error(_LE("share server '%(ssid)s' in use by CGs"), {'ssid': server['id']}) raise exception.ShareServerInUse(share_server_id=server['id']) # NOTE(vponomaryov): There is no share_server status update here, # it is intentional. # Status will be changed in manila.share.manager after verification # for race condition between share creation on server # and server deletion. self.share_rpcapi.delete_share_server(context, server) def create_snapshot(self, context, share, name, description, force=False): policy.check_policy(context, 'share', 'create_snapshot', share) if ((not force) and (share['status'] != constants.STATUS_AVAILABLE)): msg = _("Source share status must be " "%s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) size = share['size'] self._check_is_share_busy(share) try: reservations = QUOTAS.reserve( context, snapshots=1, snapshot_gigabytes=size) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'snapshot_gigabytes' in overs: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': size, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['snapshot_gigabytes']}) raise exception.SnapshotSizeExceedsAvailableQuota() elif 'snapshots' in overs: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed('snapshots')}) raise exception.SnapshotLimitExceeded( allowed=quotas['snapshots']) options = {'share_id': share['id'], 'size': share['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_name': name, 'display_description': description, 'share_proto': share['share_proto']} try: snapshot = self.db.share_snapshot_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.snapshot_delete(context, share['id']) finally: QUOTAS.rollback(context, reservations) # If replicated share, create snapshot instances for each replica if share.get('has_replicas'): snapshot = self.db.share_snapshot_get(context, snapshot['id']) share_instance_id = snapshot['instance']['share_instance_id'] replicas = self.db.share_replicas_get_all_by_share( context, share['id']) replicas = [r for r in replicas if r['id'] != share_instance_id] snapshot_instance = { 'status': constants.STATUS_CREATING, 'progress': '0%', } for replica in replicas: snapshot_instance.update({'share_instance_id': replica['id']}) self.db.share_snapshot_instance_create( context, snapshot['id'], snapshot_instance) self.share_rpcapi.create_replicated_snapshot( context, share, snapshot) else: self.share_rpcapi.create_snapshot(context, share, snapshot) return snapshot def migration_start(self, context, share, host, force_host_copy, notify=True): """Migrates share to a new host.""" share_instance = share.instance # NOTE(gouthamr): Ensure share does not have replicas. # Currently share migrations are disallowed for replicated shares. if share.has_replicas: msg = _('Share %s has replicas. Remove the replicas before ' 'attempting to migrate the share.') % share['id'] LOG.error(msg) raise exception.Conflict(err=msg) # We only handle "available" share for now if share_instance['status'] != constants.STATUS_AVAILABLE: msg = _('Share instance %(instance_id)s status must be available, ' 'but current status is: %(instance_status)s.') % { 'instance_id': share_instance['id'], 'instance_status': share_instance['status']} raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) # Make sure the destination host is different than the current one if host == share_instance['host']: msg = _('Destination host %(dest_host)s must be different ' 'than the current host %(src_host)s.') % { 'dest_host': host, 'src_host': share_instance['host']} raise exception.InvalidHost(reason=msg) # We only handle shares without snapshots for now snaps = self.db.share_snapshot_get_all_for_share(context, share['id']) if snaps: msg = _("Share %s must not have snapshots.") % share['id'] raise exception.InvalidShare(reason=msg) # Make sure the host is in the list of available hosts utils.validate_service_host(context, share_utils.extract_host(host)) # NOTE(ganso): there is the possibility of an error between here and # manager code, which will cause the share to be stuck in # MIGRATION_STARTING status. According to Liberty Midcycle discussion, # this kind of scenario should not be cleaned up, the administrator # should be issued to clear this status before a new migration request # is made self.update( context, share, {'task_state': constants.TASK_STATE_MIGRATION_STARTING}) share_type = {} share_type_id = share['share_type_id'] if share_type_id: share_type = share_types.get_share_type(context, share_type_id) request_spec = self._get_request_spec_dict(share, share_type) try: self.scheduler_rpcapi.migrate_share_to_host(context, share['id'], host, force_host_copy, notify, request_spec) except Exception: msg = _('Destination host %(dest_host)s did not pass validation ' 'for migration of share %(share)s.') % { 'dest_host': host, 'share': share['id']} raise exception.InvalidHost(reason=msg) def migration_complete(self, context, share): if share['task_state'] not in ( constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): msg = _("First migration phase of share %s not completed" " yet.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) share_instance_id = None new_share_instance_id = None if share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_COMPLETED): for instance in share.instances: if instance['status'] == constants.STATUS_MIGRATING: share_instance_id = instance['id'] if instance['status'] == constants.STATUS_MIGRATING_TO: new_share_instance_id = instance['id'] if None in (share_instance_id, new_share_instance_id): msg = _("Share instances %(instance_id)s and " "%(new_instance_id)s in inconsistent states, cannot" " continue share migration for share %(share_id)s" ".") % {'instance_id': share_instance_id, 'new_instance_id': new_share_instance_id, 'share_id': share['id']} raise exception.ShareMigrationFailed(reason=msg) share_rpc = share_rpcapi.ShareAPI() share_rpc.migration_complete(context, share, share_instance_id, new_share_instance_id) def migration_get_progress(self, context, share): if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_rpc = share_rpcapi.ShareAPI() return share_rpc.migration_get_progress(context, share) elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): data_rpc = data_rpcapi.DataAPI() LOG.info(_LI("Sending request to get share migration information" " of share %s.") % share['id']) return data_rpc.data_copy_get_progress(context, share['id']) else: msg = _("Migration of share %s data copy progress cannot be " "obtained at this moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) def migration_cancel(self, context, share): if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_rpc = share_rpcapi.ShareAPI() share_rpc.migration_cancel(context, share) elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): data_rpc = data_rpcapi.DataAPI() LOG.info(_LI("Sending request to cancel migration of " "share %s.") % share['id']) data_rpc.data_copy_cancel(context, share['id']) else: msg = _("Data copy for migration of share %s cannot be cancelled" " at this moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) @policy.wrap_check_policy('share') def delete_snapshot(self, context, snapshot, force=False): statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) if not (force or snapshot['aggregate_status'] in statuses): msg = _("Share Snapshot status must be one of %(statuses)s.") % { "statuses": statuses} raise exception.InvalidShareSnapshot(reason=msg) share = self.db.share_get(context, snapshot['share_id']) snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}) ) for snapshot_instance in snapshot_instances: self.db.share_snapshot_instance_update( context, snapshot_instance['id'], {'status': constants.STATUS_DELETING}) if share['has_replicas']: self.share_rpcapi.delete_replicated_snapshot( context, snapshot, share['instance']['host'], share_id=share['id'], force=force) else: self.share_rpcapi.delete_snapshot(context, snapshot, share['instance']['host']) @policy.wrap_check_policy('share') def update(self, context, share, fields): if 'is_public' in fields: try: fields['is_public'] = strutils.bool_from_string( fields['is_public'], strict=True) except ValueError as e: raise exception.InvalidParameterValue(six.text_type(e)) return self.db.share_update(context, share['id'], fields) @policy.wrap_check_policy('share') def snapshot_update(self, context, snapshot, fields): return self.db.share_snapshot_update(context, snapshot['id'], fields) def get(self, context, share_id): rv = self.db.share_get(context, share_id) if not rv['is_public']: policy.check_policy(context, 'share', 'get', rv) return rv def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc'): policy.check_policy(context, 'share', 'get_all') if search_opts is None: search_opts = {} LOG.debug("Searching for shares by: %s", six.text_type(search_opts)) # Prepare filters filters = {} if 'metadata' in search_opts: filters['metadata'] = search_opts.pop('metadata') if not isinstance(filters['metadata'], dict): msg = _("Wrong metadata filter provided: " "%s.") % six.text_type(filters['metadata']) raise exception.InvalidInput(reason=msg) if 'extra_specs' in search_opts: # Verify policy for extra-specs access extensions.extension_authorizer( 'share', 'types_extra_specs')(context) filters['extra_specs'] = search_opts.pop('extra_specs') if not isinstance(filters['extra_specs'], dict): msg = _("Wrong extra specs filter provided: " "%s.") % six.text_type(filters['extra_specs']) raise exception.InvalidInput(reason=msg) if not (isinstance(sort_key, six.string_types) and sort_key): msg = _("Wrong sort_key filter provided: " "'%s'.") % six.text_type(sort_key) raise exception.InvalidInput(reason=msg) if not (isinstance(sort_dir, six.string_types) and sort_dir): msg = _("Wrong sort_dir filter provided: " "'%s'.") % six.text_type(sort_dir) raise exception.InvalidInput(reason=msg) is_public = search_opts.pop('is_public', False) is_public = strutils.bool_from_string(is_public, strict=True) # Get filtered list of shares if 'share_server_id' in search_opts: # NOTE(vponomaryov): this is project_id independent policy.check_policy(context, 'share', 'list_by_share_server_id') shares = self.db.share_get_all_by_share_server( context, search_opts.pop('share_server_id'), filters=filters, sort_key=sort_key, sort_dir=sort_dir) elif (context.is_admin and 'all_tenants' in search_opts): shares = self.db.share_get_all( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir) else: shares = self.db.share_get_all_by_project( context, project_id=context.project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir) # NOTE(vponomaryov): we do not need 'all_tenants' opt anymore search_opts.pop('all_tenants', None) if search_opts: results = [] for s in shares: # values in search_opts can be only strings if all(s.get(k, None) == v for k, v in search_opts.items()): results.append(s) shares = results return shares def get_snapshot(self, context, snapshot_id): policy.check_policy(context, 'share_snapshot', 'get_snapshot') return self.db.share_snapshot_get(context, snapshot_id) def get_all_snapshots(self, context, search_opts=None, sort_key='share_id', sort_dir='desc'): policy.check_policy(context, 'share_snapshot', 'get_all_snapshots') search_opts = search_opts or {} LOG.debug("Searching for snapshots by: %s", six.text_type(search_opts)) # Read and remove key 'all_tenants' if was provided all_tenants = search_opts.pop('all_tenants', None) string_args = {'sort_key': sort_key, 'sort_dir': sort_dir} string_args.update(search_opts) for k, v in string_args.items(): if not (isinstance(v, six.string_types) and v): msg = _("Wrong '%(k)s' filter provided: " "'%(v)s'.") % {'k': k, 'v': string_args[k]} raise exception.InvalidInput(reason=msg) if (context.is_admin and all_tenants): snapshots = self.db.share_snapshot_get_all( context, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir) else: snapshots = self.db.share_snapshot_get_all_by_project( context, context.project_id, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir) # Remove key 'usage' if provided search_opts.pop('usage', None) if search_opts: results = [] not_found = object() for snapshot in snapshots: for opt, value in search_opts.items(): if snapshot.get(opt, not_found) != value: break else: results.append(snapshot) snapshots = results return snapshots def allow_access(self, ctx, share, access_type, access_to, access_level=None): """Allow access to share.""" policy.check_policy(ctx, 'share', 'allow_access') share = self.db.share_get(ctx, share['id']) if share['status'] != constants.STATUS_AVAILABLE: msg = _("Share status must be %s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) values = { 'share_id': share['id'], 'access_type': access_type, 'access_to': access_to, 'access_level': access_level, } share_access_list = self.db.share_access_get_all_by_type_and_access( ctx, share['id'], access_type, access_to) if len(share_access_list) > 0: raise exception.ShareAccessExists(access_type=access_type, access=access_to) if access_level not in constants.ACCESS_LEVELS + (None, ): msg = _("Invalid share access level: %s.") % access_level raise exception.InvalidShareAccess(reason=msg) access = self.db.share_access_create(ctx, values) for share_instance in share.instances: self.allow_access_to_instance(ctx, share_instance, access) # NOTE(tpsilva): refreshing share_access model access = self.db.share_access_get(ctx, access['id']) return { 'id': access['id'], 'share_id': access['share_id'], 'access_type': access['access_type'], 'access_to': access['access_to'], 'access_level': access['access_level'], 'state': access['state'], } def allow_access_to_instance(self, context, share_instance, access): policy.check_policy(context, 'share', 'allow_access') if not share_instance['host']: msg = _("Invalid share instance host: %s") % share_instance['host'] raise exception.InvalidShareInstance(reason=msg) status = share_instance['access_rules_status'] if status == constants.STATUS_ERROR: values = { 'instance_id': share_instance['id'], 'status': status, 'valid_status': constants.STATUS_ACTIVE } msg = _("Share instance %(instance_id)s access rules status is: " "%(status)s. Please remove any incorrect rules to get it " "back to %(valid_status)s.") % values raise exception.InvalidShareInstance(reason=msg) else: if status == constants.STATUS_ACTIVE: self.db.share_instance_update_access_status( context, share_instance['id'], constants.STATUS_OUT_OF_SYNC ) elif status == constants.STATUS_UPDATING: self.db.share_instance_update_access_status( context, share_instance['id'], constants.STATUS_UPDATING_MULTIPLE ) self.share_rpcapi.allow_access(context, share_instance, access) def deny_access(self, ctx, share, access): """Deny access to share.""" policy.check_policy(ctx, 'share', 'deny_access') # First check state of the target share share = self.db.share_get(ctx, share['id']) if not (share.instances and share.instance['host']): msg = _("Share doesn't have any instances") raise exception.InvalidShare(reason=msg) if share['status'] != constants.STATUS_AVAILABLE: msg = _("Share status must be %s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) for share_instance in share.instances: try: self.deny_access_to_instance(ctx, share_instance, access) except exception.NotFound: LOG.warning(_LW("Access rule %(access_id)s not found " "for instance %(instance_id)s.") % { 'access_id': access['id'], 'instance_id': share_instance['id']}) def deny_access_to_instance(self, context, share_instance, access): policy.check_policy(context, 'share', 'deny_access') if not share_instance['host']: msg = _("Invalid share instance host: %s") % share_instance['host'] raise exception.InvalidShareInstance(reason=msg) status = share_instance['access_rules_status'] if status != constants.STATUS_ERROR: new_status = constants.STATUS_OUT_OF_SYNC if status in constants.UPDATING_RULES_STATUSES: new_status = constants.STATUS_UPDATING_MULTIPLE self.db.share_instance_update_access_status( context, share_instance['id'], new_status) self.share_rpcapi.deny_access(context, share_instance, access) def access_get_all(self, context, share): """Returns all access rules for share.""" policy.check_policy(context, 'share', 'access_get_all') rules = self.db.share_access_get_all_for_share(context, share['id']) return [{'id': rule.id, 'access_type': rule.access_type, 'access_to': rule.access_to, 'access_level': rule.access_level, 'state': rule.state, } for rule in rules] def access_get(self, context, access_id): """Returns access rule with the id.""" policy.check_policy(context, 'share', 'access_get') rule = self.db.share_access_get(context, access_id) return rule @policy.wrap_check_policy('share') def get_share_metadata(self, context, share): """Get all metadata associated with a share.""" rv = self.db.share_metadata_get(context, share['id']) return dict(rv.items()) @policy.wrap_check_policy('share') def delete_share_metadata(self, context, share, key): """Delete the given metadata item from a share.""" self.db.share_metadata_delete(context, share['id'], key) def _check_is_share_busy(self, share): """Raises an exception if share is busy with an active task.""" if share.is_busy: msg = _("Share %(share_id)s is busy as part of an active " "task: %(task)s.") % { 'share_id': share['id'], 'task': share['task_state'] } raise exception.ShareBusyException(reason=msg) def _check_metadata_properties(self, context, metadata=None): if not metadata: metadata = {} for k, v in metadata.items(): if not k: msg = _("Metadata property key is blank.") LOG.warning(msg) raise exception.InvalidShareMetadata(message=msg) if len(k) > 255: msg = _("Metadata property key is " "greater than 255 characters.") LOG.warning(msg) raise exception.InvalidShareMetadataSize(message=msg) if not v: msg = _("Metadata property value is blank.") LOG.warning(msg) raise exception.InvalidShareMetadata(message=msg) if len(v) > 1023: msg = _("Metadata property value is " "greater than 1023 characters.") LOG.warning(msg) raise exception.InvalidShareMetadataSize(message=msg) @policy.wrap_check_policy('share') def update_share_metadata(self, context, share, metadata, delete=False): """Updates or creates share metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ orig_meta = self.get_share_metadata(context, share) if delete: _metadata = metadata else: _metadata = orig_meta.copy() _metadata.update(metadata) self._check_metadata_properties(context, _metadata) self.db.share_metadata_update(context, share['id'], _metadata, delete) return _metadata def get_share_network(self, context, share_net_id): return self.db.share_network_get(context, share_net_id) def extend(self, context, share, new_size): policy.check_policy(context, 'share', 'extend') if share['status'] != constants.STATUS_AVAILABLE: msg_params = { 'valid_status': constants.STATUS_AVAILABLE, 'share_id': share['id'], 'status': share['status'], } msg = _("Share %(share_id)s status must be '%(valid_status)s' " "to extend, but current status is: " "%(status)s.") % msg_params raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) size_increase = int(new_size) - share['size'] if size_increase <= 0: msg = (_("New size for extend must be greater " "than current size. (current: %(size)s, " "extended: %(new_size)s).") % {'new_size': new_size, 'size': share['size']}) raise exception.InvalidInput(reason=msg) try: # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased reservations = QUOTAS.reserve(context, project_id=share['project_id'], gigabytes=size_increase, user_id=share['user_id']) except exception.OverQuota as exc: usages = exc.kwargs['usages'] quotas = exc.kwargs['quotas'] def _consumed(name): return usages[name]['reserved'] + usages[name]['in_use'] msg = _LE("Quota exceeded for %(s_pid)s, tried to extend share " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " "already consumed).") LOG.error(msg, {'s_pid': context.project_id, 's_size': size_increase, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.ShareSizeExceedsAvailableQuota( requested=size_increase, consumed=_consumed('gigabytes'), quota=quotas['gigabytes']) self.update(context, share, {'status': constants.STATUS_EXTENDING}) self.share_rpcapi.extend_share(context, share, new_size, reservations) LOG.info(_LI("Extend share request issued successfully."), resource=share) def shrink(self, context, share, new_size): policy.check_policy(context, 'share', 'shrink') status = six.text_type(share['status']).lower() valid_statuses = (constants.STATUS_AVAILABLE, constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR) if status not in valid_statuses: msg_params = { 'valid_status': ", ".join(valid_statuses), 'share_id': share['id'], 'status': status, } msg = _("Share %(share_id)s status must in (%(valid_status)s) " "to shrink, but current status is: " "%(status)s.") % msg_params raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) size_decrease = int(share['size']) - int(new_size) if size_decrease <= 0 or new_size <= 0: msg = (_("New size for shrink must be less " "than current size and greater than 0 (current: %(size)s," " new: %(new_size)s)") % {'new_size': new_size, 'size': share['size']}) raise exception.InvalidInput(reason=msg) self.update(context, share, {'status': constants.STATUS_SHRINKING}) self.share_rpcapi.shrink_share(context, share, new_size) LOG.info(_LI("Shrink share (id=%(id)s) request issued successfully." " New size: %(size)s") % {'id': share['id'], 'size': new_size}) manila-2.0.0/manila/opts.py0000664000567000056710000001533312701407107016717 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 SUSE Linux Products GmbH. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_opts' ] import copy import itertools import oslo_concurrency.opts import oslo_log._options import oslo_middleware.opts import oslo_policy.opts import manila.api.common import manila.api.middleware.auth import manila.common.config import manila.compute import manila.compute.nova import manila.db.api import manila.db.base import manila.exception import manila.network import manila.network.linux.interface import manila.network.neutron.api import manila.network.neutron.neutron_network_plugin import manila.network.nova_network_plugin import manila.network.standalone_network_plugin import manila.quota import manila.scheduler.drivers.base import manila.scheduler.drivers.simple import manila.scheduler.host_manager import manila.scheduler.manager import manila.scheduler.scheduler_options import manila.scheduler.weighers import manila.scheduler.weighers.capacity import manila.scheduler.weighers.pool import manila.service import manila.share.api import manila.share.driver import manila.share.drivers.cephfs.cephfs_native import manila.share.drivers.emc.driver import manila.share.drivers.emc.plugins.isilon.isilon import manila.share.drivers.generic import manila.share.drivers.glusterfs import manila.share.drivers.glusterfs.common import manila.share.drivers.glusterfs.layout import manila.share.drivers.glusterfs.layout_directory import manila.share.drivers.glusterfs.layout_volume import manila.share.drivers.hdfs.hdfs_native import manila.share.drivers.hitachi.hds_hnas import manila.share.drivers.hpe.hpe_3par_driver import manila.share.drivers.huawei.huawei_nas import manila.share.drivers.ibm.gpfs import manila.share.drivers.netapp.options import manila.share.drivers.quobyte.quobyte import manila.share.drivers.service_instance import manila.share.drivers.tegile.tegile import manila.share.drivers.windows.service_instance import manila.share.drivers.windows.winrm_helper import manila.share.drivers.zfsonlinux.driver import manila.share.drivers.zfssa.zfssashare import manila.share.drivers_private_data import manila.share.hook import manila.share.manager import manila.volume import manila.volume.cinder import manila.wsgi # List of *all* options in [DEFAULT] namespace of manila. # Any new option list or option needs to be registered here. _global_opt_lists = [ # Keep list alphabetically sorted manila.api.common.api_common_opts, [manila.api.middleware.auth.use_forwarded_for_opt], manila.common.config.core_opts, manila.common.config.debug_opts, manila.common.config.global_opts, manila.compute._compute_opts, manila.compute.nova.nova_opts, manila.db.api.db_opts, [manila.db.base.db_driver_opt], manila.exception.exc_log_opts, manila.network.linux.interface.OPTS, manila.network.network_opts, manila.network.neutron.api.neutron_opts, manila.network.neutron.neutron_network_plugin. neutron_single_network_plugin_opts, manila.network.nova_network_plugin.nova_single_network_plugin_opts, manila.network.standalone_network_plugin.standalone_network_plugin_opts, manila.quota.quota_opts, manila.scheduler.drivers.base.scheduler_driver_opts, manila.scheduler.host_manager.host_manager_opts, [manila.scheduler.manager.scheduler_driver_opt], [manila.scheduler.scheduler_options.scheduler_json_config_location_opt], manila.scheduler.drivers.simple.simple_scheduler_opts, manila.scheduler.weighers.capacity.capacity_weight_opts, manila.scheduler.weighers.pool.pool_weight_opts, manila.service.service_opts, manila.share.api.share_api_opts, manila.share.driver.ganesha_opts, manila.share.driver.share_opts, manila.share.driver.ssh_opts, manila.share.drivers_private_data.private_data_opts, manila.share.drivers.cephfs.cephfs_native.cephfs_native_opts, manila.share.drivers.emc.driver.EMC_NAS_OPTS, manila.share.drivers.generic.share_opts, manila.share.drivers.glusterfs.common.glusterfs_common_opts, manila.share.drivers.glusterfs.GlusterfsManilaShare_opts, manila.share.drivers.glusterfs.layout.glusterfs_share_layout_opts, manila.share.drivers.glusterfs.layout_directory. glusterfs_directory_mapped_opts, manila.share.drivers.glusterfs.layout_volume.glusterfs_volume_mapped_opts, manila.share.drivers.hdfs.hdfs_native.hdfs_native_share_opts, manila.share.drivers.hitachi.hds_hnas.hds_hnas_opts, manila.share.drivers.hpe.hpe_3par_driver.HPE3PAR_OPTS, manila.share.drivers.huawei.huawei_nas.huawei_opts, manila.share.drivers.ibm.gpfs.gpfs_share_opts, manila.share.drivers.netapp.options.netapp_proxy_opts, manila.share.drivers.netapp.options.netapp_connection_opts, manila.share.drivers.netapp.options.netapp_transport_opts, manila.share.drivers.netapp.options.netapp_basicauth_opts, manila.share.drivers.netapp.options.netapp_provisioning_opts, manila.share.drivers.netapp.options.netapp_replication_opts, manila.share.drivers.quobyte.quobyte.quobyte_manila_share_opts, manila.share.drivers.service_instance.common_opts, manila.share.drivers.service_instance.no_share_servers_handling_mode_opts, manila.share.drivers.service_instance.share_servers_handling_mode_opts, manila.share.drivers.tegile.tegile.tegile_opts, manila.share.drivers.windows.service_instance.windows_share_server_opts, manila.share.drivers.windows.winrm_helper.winrm_opts, manila.share.drivers.zfsonlinux.driver.zfsonlinux_opts, manila.share.drivers.zfssa.zfssashare.ZFSSA_OPTS, manila.share.hook.hook_options, manila.share.manager.share_manager_opts, manila.volume._volume_opts, manila.volume.cinder.cinder_opts, manila.wsgi.eventlet_opts, manila.wsgi.socket_opts, ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), ] _opts.extend(oslo_concurrency.opts.list_opts()) _opts.extend(oslo_log._options.list_opts()) _opts.extend(oslo_middleware.opts.list_opts()) _opts.extend(oslo_policy.opts.list_opts()) _opts.extend(manila.network.neutron.api.list_opts()) _opts.extend(manila.compute.nova.list_opts()) _opts.extend(manila.volume.cinder.list_opts()) def list_opts(): """Return a list of oslo.config options available in Manila.""" return [(m, copy.deepcopy(o)) for m, o in _opts] manila-2.0.0/manila/volume/0000775000567000056710000000000012701407265016667 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/volume/cinder.py0000664000567000056710000003460512701407112020504 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to volumes + cinder. """ import copy from cinderclient import exceptions as cinder_exception from cinderclient.v2 import client as cinder_client from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_log import log import six from manila.common import client_auth from manila.common.config import core_opts import manila.context as ctxt from manila.db import base from manila import exception from manila.i18n import _ CINDER_GROUP = 'cinder' cinder_deprecated_opts = [ cfg.StrOpt('cinder_catalog_info', default='volume:cinder:publicURL', help='Info to match when looking for cinder in the service ' 'catalog. Format is separated values of the form: ' '::', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer."), cfg.StrOpt('cinder_admin_username', default='cinder', help='Cinder admin username.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [cinder] username instead."), cfg.StrOpt('cinder_admin_password', help='Cinder admin password.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [cinder] password instead."), cfg.StrOpt('cinder_admin_tenant_name', default='service', help='Cinder admin tenant name.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [cinder] tenant_name instead."), cfg.StrOpt('cinder_admin_auth_url', default='http://localhost:5000/v2.0', help='Identity service URL.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [cinder] auth_url instead.") ] cinder_opts = [ cfg.BoolOpt('cross_az_attach', default=True, deprecated_group="DEFAULT", deprecated_name="cinder_cross_az_attach", help='Allow attaching between instances and volumes in ' 'different availability zones.'), cfg.StrOpt('ca_certificates_file', help='Location of CA certificates file to use for cinder ' 'client requests.', deprecated_group='DEFAULT', deprecated_name="cinder_ca_certificates_file"), cfg.IntOpt('http_retries', default=3, help='Number of cinderclient retries on failed HTTP calls.', deprecated_group='DEFAULT', deprecated_name="cinder_http_retries"), cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to cinder.', deprecated_group='DEFAULT', deprecated_name="cinder_api_insecure"), ] CONF = cfg.CONF CONF.register_opts(cinder_deprecated_opts) CONF.register_opts(core_opts) CONF.register_opts(cinder_opts, CINDER_GROUP) ks_loading.register_session_conf_options(CONF, CINDER_GROUP) ks_loading.register_auth_conf_options(CONF, CINDER_GROUP) LOG = log.getLogger(__name__) def list_opts(): return client_auth.AuthClientLoader.list_opts(CINDER_GROUP) auth_obj = None def cinderclient(context): global auth_obj if not auth_obj: deprecated_opts_for_v2 = { 'username': CONF.nova_admin_username, 'password': CONF.nova_admin_password, 'tenant_name': CONF.nova_admin_tenant_name, 'auth_url': CONF.nova_admin_auth_url, } auth_obj = client_auth.AuthClientLoader( client_class=cinder_client.Client, exception_module=cinder_exception, cfg_group=CINDER_GROUP, deprecated_opts_for_v2=deprecated_opts_for_v2) return auth_obj.get_client(context, insecure=CONF[CINDER_GROUP].api_insecure, cacert=CONF[CINDER_GROUP].ca_certificates_file, retries=CONF[CINDER_GROUP].http_retries) def _untranslate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} d['id'] = vol.id d['status'] = vol.status d['size'] = vol.size d['availability_zone'] = vol.availability_zone d['created_at'] = vol.created_at d['attach_time'] = "" d['mountpoint'] = "" if vol.attachments: att = vol.attachments[0] d['attach_status'] = 'attached' d['instance_uuid'] = att['server_id'] d['mountpoint'] = att['device'] else: d['attach_status'] = 'detached' d['name'] = vol.name d['description'] = vol.description d['volume_type_id'] = vol.volume_type d['snapshot_id'] = vol.snapshot_id d['volume_metadata'] = {} for key, value in vol.metadata.items(): d['volume_metadata'][key] = value if hasattr(vol, 'volume_image_metadata'): d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata) return d def _untranslate_snapshot_summary_view(context, snapshot): """Maps keys for snapshots summary view.""" d = {} d['id'] = snapshot.id d['status'] = snapshot.status d['progress'] = snapshot.progress d['size'] = snapshot.size d['created_at'] = snapshot.created_at d['name'] = snapshot.name d['description'] = snapshot.description d['volume_id'] = snapshot.volume_id d['project_id'] = snapshot.project_id d['volume_size'] = snapshot.size return d def translate_volume_exception(method): """Transforms the exception for the volume, keeps its traceback intact.""" def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except cinder_exception.ClientException as e: if isinstance(e, cinder_exception.NotFound): raise exception.VolumeNotFound(volume_id=volume_id) elif isinstance(e, cinder_exception.BadRequest): raise exception.InvalidInput(reason=six.text_type(e)) return res return wrapper def translate_snapshot_exception(method): """Transforms the exception for the snapshot. Note: Keeps its traceback intact. """ def wrapper(self, ctx, snapshot_id, *args, **kwargs): try: res = method(self, ctx, snapshot_id, *args, **kwargs) except cinder_exception.ClientException as e: if isinstance(e, cinder_exception.NotFound): raise exception.VolumeSnapshotNotFound(snapshot_id=snapshot_id) return res return wrapper class API(base.Base): """API for interacting with the volume manager.""" @translate_volume_exception def get(self, context, volume_id): item = cinderclient(context).volumes.get(volume_id) return _untranslate_volume_summary_view(context, item) def get_all(self, context, search_opts={}): items = cinderclient(context).volumes.list(detailed=True, search_opts=search_opts) rval = [] for item in items: rval.append(_untranslate_volume_summary_view(context, item)) return rval def check_attached(self, context, volume): """Raise exception if volume in use.""" if volume['status'] != "in-use": msg = _("status must be 'in-use'") raise exception.InvalidVolume(reason=msg) def check_attach(self, context, volume, instance=None): if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg) if instance and not CONF[CINDER_GROUP].cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(reason=msg) def check_detach(self, context, volume): if volume['status'] == "available": msg = _("already detached") raise exception.InvalidVolume(reason=msg) @translate_volume_exception def reserve_volume(self, context, volume_id): cinderclient(context).volumes.reserve(volume_id) @translate_volume_exception def unreserve_volume(self, context, volume_id): cinderclient(context).volumes.unreserve(volume_id) @translate_volume_exception def begin_detaching(self, context, volume_id): cinderclient(context).volumes.begin_detaching(volume_id) @translate_volume_exception def roll_detaching(self, context, volume_id): cinderclient(context).volumes.roll_detaching(volume_id) @translate_volume_exception def attach(self, context, volume_id, instance_uuid, mountpoint): cinderclient(context).volumes.attach(volume_id, instance_uuid, mountpoint) @translate_volume_exception def detach(self, context, volume_id): cinderclient(context).volumes.detach(volume_id) @translate_volume_exception def initialize_connection(self, context, volume_id, connector): return cinderclient(context).volumes.initialize_connection(volume_id, connector) @translate_volume_exception def terminate_connection(self, context, volume_id, connector): return cinderclient(context).volumes.terminate_connection(volume_id, connector) def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None): if snapshot is not None: snapshot_id = snapshot['id'] else: snapshot_id = None kwargs = dict(snapshot_id=snapshot_id, name=name, description=description, volume_type=volume_type, user_id=context.user_id, project_id=context.project_id, availability_zone=availability_zone, metadata=metadata, imageRef=image_id) try: item = cinderclient(context).volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) except cinder_exception.BadRequest as e: raise exception.InvalidInput(reason=six.text_type(e)) except cinder_exception.NotFound: raise exception.NotFound( _("Error in creating cinder " "volume. Cinder volume type %s not exist. Check parameter " "cinder_volume_type in configuration file.") % volume_type) except Exception as e: raise exception.ManilaException(e) @translate_volume_exception def extend(self, context, volume_id, new_size): cinderclient(context).volumes.extend(volume_id, new_size) @translate_volume_exception def delete(self, context, volume_id): cinderclient(context).volumes.delete(volume_id) @translate_volume_exception def update(self, context, volume_id, fields): # Use Manila's context as far as Cinder's is restricted to update # volumes. manila_admin_context = ctxt.get_admin_context() client = cinderclient(manila_admin_context) item = client.volumes.get(volume_id) client.volumes.update(item, **fields) def get_volume_encryption_metadata(self, context, volume_id): return cinderclient(context).volumes.get_encryption_metadata(volume_id) @translate_snapshot_exception def get_snapshot(self, context, snapshot_id): item = cinderclient(context).volume_snapshots.get(snapshot_id) return _untranslate_snapshot_summary_view(context, item) def get_all_snapshots(self, context, search_opts=None): items = cinderclient(context).volume_snapshots.list( detailed=True, search_opts=search_opts) rvals = [] for item in items: rvals.append(_untranslate_snapshot_summary_view(context, item)) return rvals @translate_volume_exception def create_snapshot(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, False, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_volume_exception def create_snapshot_force(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, True, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_snapshot_exception def delete_snapshot(self, context, snapshot_id): cinderclient(context).volume_snapshots.delete(snapshot_id) manila-2.0.0/manila/volume/__init__.py0000664000567000056710000000221512701407107020773 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_config.cfg import oslo_utils.importutils _volume_opts = [ oslo_config.cfg.StrOpt('volume_api_class', default='manila.volume.cinder.API', help='The full class name of the ' 'Volume API class to use.'), ] oslo_config.cfg.CONF.register_opts(_volume_opts) def API(): importutils = oslo_utils.importutils volume_api_class = oslo_config.cfg.CONF.volume_api_class cls = importutils.import_class(volume_api_class) return cls() manila-2.0.0/manila/utils.py0000664000567000056710000005456112701407107017100 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import errno import functools import inspect import os import pyclbr import random import re import shutil import socket import sys import tempfile import time from eventlet import pools import netaddr from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import paramiko import retrying import six from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) synchronized = lockutils.synchronized_with_prefix('manila-') def _get_root_helper(): return 'sudo manila-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() function.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.execute(*cmd, **kwargs) def trycmd(*args, **kwargs): """Convenience wrapper around oslo's trycmd() function.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = _get_root_helper() return processutils.trycmd(*args, **kwargs) class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.path_to_private_key = privatekey super(SSHPool, self).__init__(*args, **kwargs) def create(self): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) look_for_keys = True if self.path_to_private_key: self.path_to_private_key = os.path.expanduser( self.path_to_private_key) look_for_keys = False elif self.password: look_for_keys = False try: ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, key_filename=self.path_to_private_key, look_for_keys=look_for_keys, timeout=self.conn_timeout) # Paramiko by default sets the socket timeout to 0.1 seconds, # ignoring what we set through the sshclient. This doesn't help for # keeping long lived connections. Hence we have to bypass it, by # overriding it after the transport is initialized. We are setting # the sockettimeout to None and setting a keepalive packet so that, # the server will keep the connection open. All that does is send # a keepalive packet every ssh_conn_timeout seconds. if self.conn_timeout: transport = ssh.get_transport() transport.sock.settimeout(None) transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Check whether private key or password are correctly " "set. Error connecting via ssh: %s") % e LOG.error(msg) raise exception.SSHException(msg) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ if self.free_items: conn = self.free_items.popleft() if conn: if conn.get_transport().is_active(): return conn else: conn.close() return self.create() if self.current_size < self.max_size: created = self.create() self.current_size += 1 return created return self.channel.get() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() ssh = None if ssh in self.free_items: self.free_items.pop(ssh) if self.current_size > 0: self.current_size -= 1 def check_ssh_injection(cmd_list): ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<'] # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(command=cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(command=cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(command=cmd_list) class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = CONF[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug('backend %s', self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) def delete_if_exists(pathname): """Delete a file, but ignore file not found error.""" try: os.unlink(pathname) except OSError as e: if e.errno == errno.ENOENT: return else: raise def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list will contain no None values. """ if path is None: raise exception.Error('Invalid mini_xpath') (first_token, sep, remainder) = path.partition('/') if first_token == '': raise exception.Error('Invalid mini_xpath') results = [] if items is None: return results if not isinstance(items, list): # Wrap single objects in a list items = [items] for item in items: if item is None: continue get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) if child is None: continue if isinstance(child, list): # Flatten intermediate lists for x in child: results.append(x) else: results.append(child) if not sep: # No more tokens return results else: return get_from_path(results, remainder) def is_ipv6_configured(): """Check if system contain IPv6 capable network interface. :rtype: bool :raises: IOError """ try: fd = open('/proc/net/if_inet6') except IOError as e: if e.errno != errno.ENOENT: raise result = False else: result = bool(fd.read(32)) fd.close() return result def is_eventlet_bug105(): """Check if eventlet support IPv6 addresses. See https://bitbucket.org/eventlet/eventlet/issue/105 :rtype: bool """ try: mod = sys.modules['eventlet.support.greendns'] except KeyError: return False try: connect_data = mod.getaddrinfo('::1', 80) except socket.gaierror: return True fail = [x for x in connect_data if x[0] != socket.AF_INET6] return bool(fail) def monkey_patch(): """Patch decorator. If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'manila.api.ec2.cloud:' \ manila.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See manila.openstack.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # NOTE(vponomaryov): we need to distinguish class methods types # for py2 and py3, because the concept of 'unbound methods' has # been removed from the python3.x if six.PY3: member_type = inspect.isfunction else: member_type = inspect.ismethod for method, func in inspect.getmembers(clz, member_type): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def read_cached_file(filename, cache_info, reload_func=None): """Read from a file if it has been modified. :param cache_info: dictionary to hold opaque cache. :param reload_func: optional function to be called with data when file is reloaded due to a modification. :returns: data from file """ mtime = os.path.getmtime(filename) if not cache_info or mtime != cache_info.get('mtime'): with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime if reload_func: reload_func(cache_info['data']) return cache_info['data'] def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. tdelta = timeutils.utcnow() - last_heartbeat elapsed = tdelta.total_seconds() return abs(elapsed) <= CONF.service_down_time def validate_service_host(context, host): service = db_api.service_get_by_host_and_topic(context, host, 'manila-share') if not service_is_up(service): raise exception.ServiceIsDown(service=service['host']) return service def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except exception.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) @contextlib.contextmanager def temporary_chown(path, owner_uid=None): """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', owner_uid, path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', orig_uid, path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs): tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.debug('Could not remove tmpdir: %s', six.text_type(e)) def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def ensure_tree(path): """Create a directory (and any ancestor directories required) :param path: Directory to create """ try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise def cidr_to_netmask(cidr): """Convert cidr to netmask.""" try: network = netaddr.IPNetwork(cidr) return str(network.netmask) except netaddr.AddrFormatError: raise exception.InvalidInput(_("Invalid cidr supplied %s") % cidr) def is_valid_ip_address(ip_address, ip_version): if int(ip_version) == 4: return netaddr.valid_ipv4(ip_address) elif int(ip_version) == 6: return netaddr.valid_ipv6(ip_address) else: raise exception.ManilaException( _("Provided improper IP version '%s'.") % ip_version) class IsAMatcher(object): def __init__(self, expected_value=None): self.expected_value = expected_value def __eq__(self, actual_value): return isinstance(actual_value, self.expected_value) class ComparableMixin(object): def _compare(self, other, method): try: return method(self._cmpkey(), other._cmpkey()) except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def retry(exception, interval=1, retries=10, backoff_rate=2, wait_random=False): """A wrapper around retrying library. This decorator allows to log and to check 'retries' input param. Time interval between retries is calculated in the following way: interval * backoff_rate ^ previous_attempt_number :param exception: expected exception type. When wrapped function raises an exception of this type, the function execution is retried. :param interval: param 'interval' is used to calculate time interval between retries: interval * backoff_rate ^ previous_attempt_number :param retries: number of retries. :param backoff_rate: param 'backoff_rate' is used to calculate time interval between retries: interval * backoff_rate ^ previous_attempt_number :param wait_random: boolean value to enable retry with random wait timer. """ def _retry_on_exception(e): return isinstance(e, exception) def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms): exp = backoff_rate ** previous_attempt_number wait_for = max(0, interval * exp) if wait_random: wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0) else: wait_val = wait_for * 1000.0 LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0)) return wait_val def _print_stop(previous_attempt_number, delay_since_first_attempt_ms): delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0 LOG.debug("Failed attempt %s", previous_attempt_number) LOG.debug("Have been at this for %s seconds", delay_since_first_attempt) return previous_attempt_number == retries if retries < 1: raise ValueError(_('Retries must be greater than or ' 'equal to 1 (received: %s).') % retries) def _decorator(f): @six.wraps(f) def _wrapper(*args, **kwargs): r = retrying.Retrying(retry_on_exception=_retry_on_exception, wait_func=_backoff_sleep, stop_func=_print_stop) return r.call(f, *args, **kwargs) return _wrapper return _decorator def require_driver_initialized(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ raise exception.DriverNotInitialized(driver=driver_name) return func(self, *args, **kwargs) return wrapper def translate_string_size_to_float(string, multiplier='G'): """Translates human-readable storage size to float value. Supported values for 'multiplier' are following: K - kilo | 1 M - mega | 1024 G - giga | 1024 * 1024 T - tera | 1024 * 1024 * 1024 P = peta | 1024 * 1024 * 1024 * 1024 returns: - float if correct input data provided - None if incorrect """ if not isinstance(string, six.string_types): return None multipliers = ('K', 'M', 'G', 'T', 'P') mapping = { k: 1024.0 ** v for k, v in zip(multipliers, range(len(multipliers))) } if multiplier not in multipliers: raise exception.ManilaException( "'multiplier' arg should be one of following: " "'%(multipliers)s'. But it is '%(multiplier)s'." % { 'multiplier': multiplier, 'multipliers': "', '".join(multipliers), } ) try: value = float(string) / 1024.0 value = value / mapping[multiplier] return value except (ValueError, TypeError): matched = re.match( r"^(\d+\.*\d*)([%s])$" % ','.join(multipliers), string) if matched: value = float(matched.groups()[0]) multiplier = mapping[matched.groups()[1]] / mapping[multiplier] return value * multiplier def wait_for_access_update(context, db, share_instance, migration_wait_access_rules_timeout): starttime = time.time() deadline = starttime + migration_wait_access_rules_timeout tries = 0 while True: instance = db.share_instance_get(context, share_instance['id']) if instance['access_rules_status'] == constants.STATUS_ACTIVE: break tries += 1 now = time.time() if instance['access_rules_status'] == constants.STATUS_ERROR: msg = _("Failed to update access rules" " on share instance %s") % share_instance['id'] raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout trying to update access rules" " on share instance %(share_id)s. Timeout " "was %(timeout)s seconds.") % { 'share_id': share_instance['id'], 'timeout': migration_wait_access_rules_timeout} raise exception.ShareMigrationFailed(reason=msg) else: time.sleep(tries ** 2) manila-2.0.0/manila/__init__.py0000664000567000056710000000000012701407107017452 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/testing/0000775000567000056710000000000012701407265017035 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/testing/README.rst0000664000567000056710000000406012701407107020517 0ustar jenkinsjenkins00000000000000======================================= OpenStack Manila Testing Infrastructure ======================================= A note of clarification is in order, to help those who are new to testing in OpenStack Manila: - actual unit tests are created in the "tests" directory; - the "testing" directory is used to house the infrastructure needed to support testing in OpenStack Manila. This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests and utilizing the convenience code provided in manila.testing. Writing Unit Tests ------------------ - All new unit tests are to be written in python-mock. - Old tests that are still written in mox should be updated to use python-mock. Usage of mox has been deprecated for writing Manila unit tests. - use addCleanup in favor of tearDown test.TestCase ------------- The TestCase class from manila.test (generally imported as test) will automatically manage self.stubs using the stubout module. They will automatically verify and clean up during the tearDown step. If using test.TestCase, calling the super class setUp is required and calling the super class tearDown is required to be last if tearDown is overridden. Running Tests ------------- In the root of the Manila source code run the run_tests.sh script. This will offer to create a virtual environment and populate it with dependencies. If you don't have dependencies installed that are needed for compiling Manila's direct dependencies, you'll have to use your operating system's method of installing extra dependencies. To get help using this script execute it with the -h parameter to get options `./run_tests.sh -h` Tests and assertRaises ---------------------- When asserting that a test should raise an exception, test against the most specific exception possible. An overly broad exception type (like Exception) can mask errors in the unit test itself. Example:: self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, elevated, instance_uuid) manila-2.0.0/manila/wsgi.py0000664000567000056710000004475512701407107016715 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" from __future__ import print_function import errno import os import socket import ssl import sys import time import eventlet import eventlet.wsgi import greenlet from oslo_config import cfg from oslo_log import log from oslo_service import service from oslo_utils import excutils from oslo_utils import netutils from paste import deploy import routes.middleware import webob.dec import webob.exc from manila.common import config from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI socket_opts = [ cfg.IntOpt('backlog', default=4096, help="Number of backlog requests to configure the socket " "with."), cfg.BoolOpt('tcp_keepalive', default=True, help="Sets the value of TCP_KEEPALIVE (True/False) for each " "server socket."), cfg.IntOpt('tcp_keepidle', default=600, help="Sets the value of TCP_KEEPIDLE in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('tcp_keepalive_interval', help="Sets the value of TCP_KEEPINTVL in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('tcp_keepalive_count', help="Sets the value of TCP_KEEPCNT for each " "server socket. Not supported on OS X."), cfg.StrOpt('ssl_ca_file', help="CA certificate file to use to verify " "connecting clients."), cfg.StrOpt('ssl_cert_file', help="Certificate file to use when starting " "the server securely."), cfg.StrOpt('ssl_key_file', help="Private key file to use when starting " "the server securely."), ] eventlet_opts = [ cfg.IntOpt('max_header_line', default=16384, help="Maximum line size of message headers to be accepted. " "Option max_header_line may need to be increased when " "using large tokens (typically those generated by the " "Keystone v3 API with big service catalogs)."), cfg.IntOpt('client_socket_timeout', default=900, help="Timeout for client connections socket operations. " "If an incoming connection is idle for this number of " "seconds it will be closed. A value of '0' means " "wait forever."), cfg.BoolOpt('wsgi_keep_alive', default=True, help='If False, closes the client socket connection ' 'explicitly. Setting it to True to maintain backward ' 'compatibility. Recommended setting is set it to False.'), ] CONF = cfg.CONF CONF.register_opts(socket_opts) CONF.register_opts(eventlet_opts) LOG = log.getLogger(__name__) class Server(service.ServiceBase): """Server class to manage a WSGI server, serving a WSGI application.""" default_pool_size = 1000 def __init__(self, name, app, host=None, port=None, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :returns: None """ eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.client_socket_timeout = CONF.client_socket_timeout self.name = name self.app = app self._host = host or "0.0.0.0" self._port = port or 0 self._server = None self._socket = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = log.getLogger("eventlet.wsgi.server") if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET cert_file = CONF.ssl_cert_file key_file = CONF.ssl_key_file ca_file = CONF.ssl_ca_file self._use_ssl = cert_file or key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError(_("Unable to find key_file : %s") % key_file) if self._use_ssl and (not cert_file or not key_file): raise RuntimeError(_("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) retry_until = time.time() + 30 while not self._socket and time.time() < retry_until: try: self._socket = eventlet.listen( bind_addr, backlog=backlog, family=family) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not self._socket: raise RuntimeError(_("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % {'host': host, 'port': port}) (self._host, self._port) = self._socket.getsockname()[0:2] LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"), {'name': self.name, '_host': self._host, '_port': self._port}) def start(self): """Start serving a WSGI application. :returns: None :raises: manila.exception.InvalidInput """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. config.set_middleware_defaults() dup_socket = self._socket.dup() netutils.set_tcp_keepalive( dup_socket, tcp_keepalive=CONF.tcp_keepalive, tcp_keepidle=CONF.tcp_keepidle, tcp_keepalive_interval=CONF.tcp_keepalive_interval, tcp_keepalive_count=CONF.tcp_keepalive_count ) if self._use_ssl: try: ssl_kwargs = { 'server_side': True, 'certfile': CONF.ssl_cert_file, 'keyfile': CONF.ssl_key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = CONF.ssl_ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED dup_socket = ssl.wrap_socket(dup_socket, **ssl_kwargs) dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Failed to start %(name)s on %(_host)s:%(_port)s " "with SSL support."), {"name": self.name, "_host": self._host, "_port": self._port} ) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._logger, 'socket_timeout': self.client_socket_timeout, 'keepalive': CONF.wsgi_keep_alive, } self._server = eventlet.spawn(**wsgi_kwargs) @property def host(self): return self._host @property def port(self): return self._port def stop(self): """Stop this server. This is not a very nice action, as currently the method by which a server is stopped is by killing its eventlet. :returns: None """ LOG.info(_LI("Stopping WSGI server.")) if self._server is not None: # Resize pool to stop new requests from being processed self._pool.resize(0) self._server.kill() def wait(self): """Block, until the server has stopped. Waits on the server's eventlet to finish, then returns. :returns: None """ try: if self._server is not None: self._pool.waitall() self._server.wait() except greenlet.GreenletExit: LOG.info(_LI("WSGI server has stopped.")) def reset(self): """Reset server greenpool size to default. :returns: None """ self._pool.resize(self.pool_size) class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = manila.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import manila.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(detail='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = manila.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import manila.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Debug(Middleware): """Helper class for debugging a WSGI application. Can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): print(('*' * 40) + ' REQUEST ENVIRON') for key, value in req.environ.items(): print(key, '=', value) print() resp = req.get_response(self.application) print(('*' * 40) + ' RESPONSE HEADERS') for (key, value) in resp.headers.items(): print(key, '=', value) print() resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """Iterator that prints the contents of a wrapper string.""" print(('*' * 40) + ' BODY') for part in app_iter: sys.stdout.write(part.decode()) sys.stdout.flush() yield part print() class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be an object that can route the request to the action-specific method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """Dispatch the request to the appropriate controller. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class Loader(object): """Used to load WSGI applications from paste configurations.""" def __init__(self, config_path=None): """Initialize the loader, and attempt to find the config. :param config_path: Full or relative path to the paste config. :returns: None """ config_path = config_path or CONF.api_paste_config self.config_path = CONF.find_file(config_path) if not self.config_path: raise exception.ConfigNotFound(path=config_path) def load_app(self, name): """Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: `manila.exception.PasteAppNotFound` """ try: return deploy.loadapp("config:%s" % self.config_path, name=name) except LookupError as err: LOG.error(err) raise exception.PasteAppNotFound(name=name, path=self.config_path) manila-2.0.0/manila/cmd/0000775000567000056710000000000012701407265016123 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/cmd/manage.py0000775000567000056710000003475212701407112017732 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for manila management. """ from __future__ import print_function import os import sys from manila import i18n i18n.enable_lazy() from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from manila.common import config # Need to register global_opts # noqa from manila import context from manila import db from manila.db import migration from manila.i18n import _ from manila import utils from manila import version CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if uuidutils.is_uuid_like(object_id): return object_id elif '-' in object_id: # FIXME(ja): mapping occurs in nova? pass else: return int(object_id) class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable """ self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable """ self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable """ self.run('python') @args('--shell', dest="shell", metavar='', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: from IPython import embed embed() except ImportError: # Ipython < 0.11 try: import IPython # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: # no IPython module shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. import rlcompleter # noqa readline.parse_and_bind("tab:complete") code.interact() @args('--path', required=True, help='Script path') def script(self, path): """Runs the script from the specifed path with flags set properly. arguments: path """ exec(compile(open(path).read(), path, 'exec'), locals(), globals()) class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Filter by zone. args: [zone] """ print("%-25s\t%-15s" % (_('host'), _('zone'))) ctxt = context.get_admin_context() services = db.service_get_all(ctxt) if zone: services = [ s for s in services if s['availability_zone']['name'] == zone] hosts = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print("%-25s\t%-15s" % (h['host'], h['availability_zone']['name'])) class DbCommands(object): """Class for managing the database.""" def __init__(self): pass @args('version', nargs='?', default=None, help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.upgrade(version) def version(self): """Print the current database version.""" print(migration.version()) # NOTE(imalinovskiy): # Manila init migration hardcoded here, # because alembic has strange behaviour: # downgrade base = downgrade from head(162a3e673105) -> base(162a3e673105) # = downgrade from 162a3e673105 -> (empty) [ERROR] # downgrade 162a3e673105 = downgrade from head(162a3e673105)->162a3e673105 # = do nothing [OK] @args('version', nargs='?', default='162a3e673105', help='Version to downgrade') def downgrade(self, version=None): """Downgrade database to the given version.""" return migration.downgrade(version) @args('--message', help='Revision message') @args('--autogenerate', help='Autogenerate migration from schema') def revision(self, message, autogenerate): """Generate new migration.""" return migration.revision(message, autogenerate) @args('version', nargs='?', default=None, help='Version to stamp version table with') def stamp(self, version=None): """Stamp the version table with the given version.""" return migration.stamp(version) class VersionCommands(object): """Class for exposing the codebase version.""" def list(self): print(version.version_string()) def __call__(self): self.list() class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def list(self): for key, value in CONF.items(): if value is not None: print('%s = %s' % (key, value)) class GetLogCommands(object): """Get logging information.""" def errors(self): """Get all of the errors from the log files.""" error_found = 0 if CONF.log_dir: logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 for index, line in enumerate(lines): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: print(log_file + ":-") print_name = 1 print("Line %d : %s" % (len(lines) - index, line)) if error_found == 0: print("No errors in logfiles!") @args('num_entries', nargs='?', type=int, default=10, help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): """Get of the manila syslog events.""" entries = int(num_entries) count = 0 log_file = '' if os.path.exists('/var/log/syslog'): log_file = '/var/log/syslog' elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: print("Unable to find system log file!") sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print("Last %s manila syslog entries:-" % (entries)) for line in lines: if line.find("manila") > 0: count += 1 print("%s" % (line)) if count == entries: break if count == 0: print("No manila entries in syslog!") class ServiceCommands(object): """Methods for managing services.""" def list(self): """Show a list of all manila services.""" ctxt = context.get_admin_context() services = db.service_get_all(ctxt) print_format = "%-16s %-36s %-16s %-10s %-5s %-10s" print(print_format % ( _('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated At')) ) for svc in services: alive = utils.service_is_up(svc) art = ":-)" if alive else "XXX" status = 'enabled' if svc['disabled']: status = 'disabled' print(print_format % ( svc['binary'], svc['host'].partition('.')[0], svc['availability_zone']['name'], status, art, svc['updated_at'], )) CATEGORIES = { 'config': ConfigCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, 'service': ServiceCommands, 'shell': ShellCommands, 'version': VersionCommands } def methods_of(obj): """Get all callable methods of an object that don't start with underscore. Returns a list of tuples of the form (method_name, method). """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): arg = None if args[0] == '-': # (Note)zhiteng: args starts with CONF.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg arg = args[2:] else: arg = args[1:] else: arg = args return arg def fetch_func_args(func): fn_args = [] for args, kwargs in getattr(func, 'args', []): arg = get_arg_string(args[0]) fn_args.append(getattr(CONF.category, arg)) return fn_args def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action []") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_('sudo failed, continuing as if nothing happened')) print(_('Please re-run manila-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args) if __name__ == '__main__': main() manila-2.0.0/manila/cmd/all.py0000775000567000056710000000445612701407107017254 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack, LLC # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for All manila services. This script attempts to start all the manila services in one process. Each service is started in its own greenthread. Please note that exceptions and sys.exit() on the starting of a service are logged and the script will continue attempting to launch the rest of the services. """ import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from manila import i18n i18n.enable_lazy() from manila.common import config # Need to register global_opts from manila.i18n import _LE from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) config.set_middleware_defaults() CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") LOG = log.getLogger('manila.all') utils.monkey_patch() launcher = service.process_launcher() # manila-api try: server = service.WSGIService('osapi_share') launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load osapi_share')) for binary in ['manila-share', 'manila-scheduler', 'manila-api', 'manila-data']: try: launcher.launch_service(service.Service.create(binary=binary)) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), binary) launcher.wait() if __name__ == '__main__': main() manila-2.0.0/manila/cmd/data.py0000775000567000056710000000247012701407107017407 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila data copy service.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from manila import i18n i18n.enable_lazy() from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() server = service.Service.create(binary='manila-data') service.serve(server) service.wait() if __name__ == '__main__': main() manila-2.0.0/manila/cmd/__init__.py0000664000567000056710000000000012701407107020215 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/cmd/scheduler.py0000775000567000056710000000265212701407107020456 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Scheduler.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from manila import i18n i18n.enable_lazy() from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() server = service.Service.create(binary='manila-scheduler') service.serve(server) service.wait() if __name__ == '__main__': main() manila-2.0.0/manila/cmd/share.py0000775000567000056710000000335612701407107017604 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Share.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from manila import i18n i18n.enable_lazy() from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() launcher = service.process_launcher() if CONF.enabled_share_backends: for backend in CONF.enabled_share_backends: host = "%s@%s" % (CONF.host, backend) server = service.Service.create(host=host, service_name=backend, binary='manila-share') launcher.launch_service(server) else: server = service.Service.create(binary='manila-share') launcher.launch_service(server) launcher.wait() if __name__ == '__main__': main() manila-2.0.0/manila/cmd/api.py0000775000567000056710000000301712701407107017245 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for manila OS API.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from manila import i18n i18n.enable_lazy() from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) config.verify_share_protocols() log.setup(CONF, "manila") utils.monkey_patch() launcher = service.process_launcher() server = service.WSGIService('osapi_share') launcher.launch_service(server, workers=server.workers or 1) launcher.wait() if __name__ == '__main__': main() manila-2.0.0/manila/manager.py0000664000567000056710000001242612701407107017344 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_config import cfg from oslo_log import log from oslo_service import periodic_task from manila.db import base from manila.scheduler import rpcapi as scheduler_rpcapi from manila import version CONF = cfg.CONF LOG = log.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): @property def RPC_API_VERSION(self): """Redefine this in child classes.""" raise NotImplementedError @property def target(self): """This property is used by oslo_messaging. https://wiki.openstack.org/wiki/Oslo/Messaging#API_Version_Negotiation """ if not hasattr(self, '_target'): import oslo_messaging as messaging self._target = messaging.Target(version=self.RPC_API_VERSION) return self._target def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host self.additional_endpoints = [] super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Handle initialization if this is a standalone service. Child classes should override this method. """ pass def service_version(self, context): return version.version_string() def service_config(self, context): config = {} for key in CONF: config[key] = CONF.get(key, None) return config class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. Services that need to update the Scheduler of their capabilities should derive from this class. Otherwise they can derive from manager.Manager directly. Updates are only sent after update_service_capabilities is called with non-None values. """ def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() super(SchedulerDependentManager, self).__init__(host, db_driver) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities @periodic_task.periodic_task def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug('Notifying Schedulers of capabilities ...') self.scheduler_rpcapi.update_service_capabilities( context, self.service_name, self.host, self.last_capabilities) manila-2.0.0/manila/network/0000775000567000056710000000000012701407265017051 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/network/linux/0000775000567000056710000000000012701407265020210 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/network/linux/__init__.py0000664000567000056710000000000012701407107022302 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/network/linux/interface.py0000664000567000056710000001610612701407107022521 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_config import cfg from oslo_log import log import six from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila.network.linux import ip_lib from manila.network.linux import ovs_lib from manila import utils LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use.')), ] CONF = cfg.CONF CONF.register_opts(OPTS) def device_name_synchronized(f): """Wraps methods with interprocess locks by device names.""" def wrapped_func(self, *args, **kwargs): device_name = "device_name_%s" % args[0] @utils.synchronized("linux_interface_%s" % device_name, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func @six.add_metaclass(abc.ABCMeta) class LinuxInterfaceDriver(object): # from linux IF_NAMESIZE DEV_NAME_LEN = 14 DEV_NAME_PREFIX = 'tap' def __init__(self): self.conf = CONF @device_name_synchronized def init_l3(self, device_name, ip_cidrs, namespace=None): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings """ device = ip_lib.IPDevice(device_name, namespace=namespace) previous = {} for address in device.addr.list(scope='global', filters=['permanent']): previous[address['cidr']] = address['ip_version'] # add new addresses for ip_cidr in ip_cidrs: net = netaddr.IPNetwork(ip_cidr) if ip_cidr in previous: del previous[ip_cidr] continue device.addr.add(net.version, ip_cidr, str(net.broadcast)) # clean up any old addresses for ip_cidr, ip_version in previous.items(): device.addr.delete(ip_version, ip_cidr) def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exception.BridgeDoesNotExist(bridge=bridge) def get_device_name(self, port): return (self.DEV_NAME_PREFIX + port['id'])[:self.DEV_NAME_LEN] @abc.abstractmethod def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" @abc.abstractmethod def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" class OVSInterfaceDriver(LinuxInterfaceDriver): """Driver for creating an internal interface on an OVS bridge.""" DEV_NAME_PREFIX = 'tap' def _get_tap_name(self, dev_name): return dev_name def _ovs_add_port(self, bridge, device_name, port_id, mac_address, internal=True): cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, device_name] if internal: cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] cmd += ['--', 'set', 'Interface', device_name, 'external-ids:iface-id=%s' % port_id, '--', 'set', 'Interface', device_name, 'external-ids:iface-status=active', '--', 'set', 'Interface', device_name, 'external-ids:attached-mac=%s' % mac_address] utils.execute(*cmd, run_as_root=True) @device_name_synchronized def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge self.check_bridge_exists(bridge) ip = ip_lib.IPWrapper() ns_dev = ip.device(device_name) if not ip_lib.device_exists(device_name, namespace=namespace): tap_name = self._get_tap_name(device_name) self._ovs_add_port(bridge, tap_name, port_id, mac_address) ns_dev.link.set_address(mac_address) # Add an interface created by ovs to the namespace. if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) else: LOG.warning(_LW("Device %s already exists."), device_name) ns_dev.link.set_up() @device_name_synchronized def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge tap_name = self._get_tap_name(device_name) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge) try: ovs.delete_port(tap_name) except RuntimeError: LOG.error(_LE("Failed unplugging interface '%s'"), device_name) class BridgeInterfaceDriver(LinuxInterfaceDriver): """Driver for creating bridge interfaces.""" DEV_NAME_PREFIX = 'ns-' @device_name_synchronized def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plugin the interface.""" ip = ip_lib.IPWrapper() if prefix: tap_name = device_name.replace(prefix, 'tap') else: tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') if not ip_lib.device_exists(device_name, namespace=namespace): # Create ns_veth in a namespace if one is configured. root_veth, ns_veth = ip.add_veth(tap_name, device_name, namespace2=namespace) ns_veth.link.set_address(mac_address) else: ns_veth = ip.device(device_name) root_veth = ip.device(tap_name) LOG.warning(_LW("Device %s already exists."), device_name) root_veth.link.set_up() ns_veth.link.set_up() @device_name_synchronized def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" device = ip_lib.IPDevice(device_name, namespace) try: device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error(_LE("Failed unplugging interface '%s'"), device_name) manila-2.0.0/manila/network/linux/ovs_lib.py0000664000567000056710000000424312701407107022215 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log from manila.i18n import _LE from manila import utils LOG = log.getLogger(__name__) class OVSBridge(object): def __init__(self, br_name): self.br_name = br_name self.re_id = self.re_compile_id() def re_compile_id(self): external = 'external_ids\s*' mac = 'attached-mac="(?P([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"' iface = 'iface-id="(?P[^"]+)"' name = 'name\s*:\s"(?P[^"]*)"' port = 'ofport\s*:\s(?P-?\d+)' _re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }' ' \s+ %(name)s \s+ %(port)s' % {'external': external, 'mac': mac, 'iface': iface, 'name': name, 'port': port}) return re.compile(_re, re.M | re.X) def run_vsctl(self, args): full_args = ["ovs-vsctl", "--timeout=2"] + args try: return utils.execute(*full_args, run_as_root=True) except Exception as e: LOG.error(_LE("Unable to execute %(cmd)s. Exception: " "%(exception)s"), {'cmd': full_args, 'exception': e}) def reset_bridge(self): self.run_vsctl(["--", "--if-exists", "del-br", self.br_name]) self.run_vsctl(["add-br", self.br_name]) def delete_port(self, port_name): self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, port_name]) manila-2.0.0/manila/network/linux/ip_lib.py0000664000567000056710000003577512701407112022030 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import six from manila.i18n import _ from manila import utils LOOPBACK_DEVNAME = 'lo' class SubProcessBase(object): def __init__(self, namespace=None): self.namespace = namespace def _run(self, options, command, args): if self.namespace: return self._as_root(options, command, args) else: return self._execute(options, command, args) def _as_root(self, options, command, args, use_root_namespace=False): namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, namespace, as_root=True) @classmethod def _execute(cls, options, command, args, namespace=None, as_root=False): opt_list = ['-%s' % o for o in options] if namespace: ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] else: ip_cmd = ['ip'] total_cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(*total_cmd, run_as_root=as_root)[0] class IPWrapper(SubProcessBase): def __init__(self, namespace=None): super(IPWrapper, self).__init__(namespace=namespace) self.netns = IpNetnsCommand(self) def device(self, name): return IPDevice(name, self.namespace) def get_devices(self, exclude_loopback=False): retval = [] output = self._execute('o', 'link', ('list',), self.namespace) for line in output.split('\n'): if '<' not in line: continue tokens = line.split(':', 2) if len(tokens) >= 3: name = tokens[1].split('@', 1)[0].strip() if exclude_loopback and name == LOOPBACK_DEVNAME: continue retval.append(IPDevice(name, self.namespace)) return retval def add_tuntap(self, name, mode='tap'): self._as_root('', 'tuntap', ('add', name, 'mode', mode)) return IPDevice(name, self.namespace) def add_veth(self, name1, name2, namespace2=None): args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] if namespace2 is None: namespace2 = self.namespace else: self.ensure_namespace(namespace2) args += ['netns', namespace2] self._as_root('', 'link', tuple(args)) return (IPDevice(name1, self.namespace), IPDevice(name2, namespace2)) def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) lo = ip.device(LOOPBACK_DEVNAME) lo.link.set_up() else: ip = IPWrapper(name) return ip def namespace_is_empty(self): return not self.get_devices(exclude_loopback=True) def garbage_collect_namespace(self): """Conditionally destroy the namespace if it is empty.""" if self.namespace and self.netns.exists(self.namespace): if self.namespace_is_empty(): self.netns.delete(self.namespace) return True return False def add_device_to_namespace(self, device): if self.namespace: device.link.set_netns(self.namespace) @classmethod def get_namespaces(cls): output = cls._execute('', 'netns', ('list',)) return [l.strip() for l in output.split('\n')] class IPDevice(SubProcessBase): def __init__(self, name, namespace=None): super(IPDevice, self).__init__(namespace=namespace) self.name = name self.link = IpLinkCommand(self) self.addr = IpAddrCommand(self) self.route = IpRouteCommand(self) def __eq__(self, other): return (other is not None and self.name == other.name and self.namespace == other.namespace) def __str__(self): return self.name class IpCommandBase(object): COMMAND = '' def __init__(self, parent): self._parent = parent def _run(self, *args, **kwargs): return self._parent._run(kwargs.get('options', []), self.COMMAND, args) def _as_root(self, *args, **kwargs): return self._parent._as_root(kwargs.get('options', []), self.COMMAND, args, kwargs.get('use_root_namespace', False)) class IpDeviceCommandBase(IpCommandBase): @property def name(self): return self._parent.name class IpLinkCommand(IpDeviceCommandBase): COMMAND = 'link' def set_address(self, mac_address): self._as_root('set', self.name, 'address', mac_address) def set_mtu(self, mtu_size): self._as_root('set', self.name, 'mtu', mtu_size) def set_up(self): self._as_root('set', self.name, 'up') def set_down(self): self._as_root('set', self.name, 'down') def set_netns(self, namespace): self._as_root('set', self.name, 'netns', namespace) self._parent.namespace = namespace def set_name(self, name): self._as_root('set', self.name, 'name', name) self._parent.name = name def set_alias(self, alias_name): self._as_root('set', self.name, 'alias', alias_name) def delete(self): self._as_root('delete', self.name) @property def address(self): return self.attributes.get('link/ether') @property def state(self): return self.attributes.get('state') @property def mtu(self): return self.attributes.get('mtu') @property def qdisc(self): return self.attributes.get('qdisc') @property def qlen(self): return self.attributes.get('qlen') @property def alias(self): return self.attributes.get('alias') @property def attributes(self): return self._parse_line(self._run('show', self.name, options='o')) def _parse_line(self, value): if not value: return {} device_name, settings = value.replace("\\", '').split('>', 1) tokens = settings.split() keys = tokens[::2] values = [int(v) if v.isdigit() else v for v in tokens[1::2]] retval = dict(zip(keys, values)) return retval class IpAddrCommand(IpDeviceCommandBase): COMMAND = 'addr' def add(self, ip_version, cidr, broadcast, scope='global'): self._as_root('add', cidr, 'brd', broadcast, 'scope', scope, 'dev', self.name, options=[ip_version]) def delete(self, ip_version, cidr): self._as_root('del', cidr, 'dev', self.name, options=[ip_version]) def flush(self): self._as_root('flush', self.name) def list(self, scope=None, to=None, filters=None): if filters is None: filters = [] retval = [] if scope: filters += ['scope', scope] if to: filters += ['to', to] for line in self._run('show', self.name, *filters).split('\n'): line = line.strip() if not line.startswith('inet'): continue parts = line.split() if parts[0] == 'inet6': version = 6 scope = parts[3] broadcast = '::' else: version = 4 if parts[2] == 'brd': broadcast = parts[3] scope = parts[5] else: # sometimes output of 'ip a' might look like: # inet 192.168.100.100/24 scope global eth0 # and broadcast needs to be calculated from CIDR broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) scope = parts[3] retval.append(dict(cidr=parts[1], broadcast=broadcast, scope=scope, ip_version=version, dynamic=('dynamic' == parts[-1]))) return retval class IpRouteCommand(IpDeviceCommandBase): COMMAND = 'route' def add_gateway(self, gateway, metric=None): args = ['replace', 'default', 'via', gateway] if metric: args += ['metric', metric] args += ['dev', self.name] self._as_root(*args) def delete_gateway(self, gateway): self._as_root('del', 'default', 'via', gateway, 'dev', self.name) def get_gateway(self, scope=None, filters=None): if filters is None: filters = [] retval = None if scope: filters += ['scope', scope] route_list_lines = self._run('list', 'dev', self.name, *filters).split('\n') default_route_line = next((x.strip() for x in route_list_lines if x.strip().startswith('default')), None) if default_route_line: gateway_index = 2 parts = default_route_line.split() retval = dict(gateway=parts[gateway_index]) metric_index = 4 parts_has_metric = (len(parts) > metric_index) if parts_has_metric: retval.update(metric=int(parts[metric_index])) return retval def pullup_route(self, interface_name): """Pullup route entry. Ensures that the route entry for the interface is before all others on the same subnet. """ device_list = [] device_route_list_lines = self._run('list', 'proto', 'kernel', 'dev', interface_name).split('\n') for device_route_line in device_route_list_lines: try: subnet = device_route_line.split()[0] except Exception: continue subnet_route_list_lines = self._run('list', 'proto', 'kernel', 'match', subnet).split('\n') for subnet_route_line in subnet_route_list_lines: i = iter(subnet_route_line.split()) while(next(i) != 'dev'): pass device = next(i) try: while(next(i) != 'src'): pass src = next(i) except Exception: src = '' if device != interface_name: device_list.append((device, src)) else: break for (device, src) in device_list: self._as_root('del', subnet, 'dev', device) if (src != ''): self._as_root('append', subnet, 'proto', 'kernel', 'src', src, 'dev', device) else: self._as_root('append', subnet, 'proto', 'kernel', 'dev', device) def clear_outdated_routes(self, cidr): """Removes duplicated routes for a certain network CIDR. Removes all routes related to supplied CIDR except for the one related to this interface device. :param cidr: The network CIDR to be cleared. """ routes = self.list() items = [x for x in routes if x['Destination'] == cidr and x.get('Device') and x['Device'] != self.name] for item in items: self.delete_net_route(item['Destination'], item['Device']) def list(self): """List all routes :return: A dictionary with field 'Destination' and 'Device' for each route entry. 'Gateway' field is included if route has a gateway. """ routes = [] output = self._as_root('list') lines = output.split('\n') for line in lines: items = line.split() if len(items) > 0: item = {'Destination': items[0]} if len(items) > 1: if items[1] == 'via': item['Gateway'] = items[2] if len(items) > 3 and items[3] == 'dev': item['Device'] = items[4] if items[1] == 'dev': item['Device'] = items[2] routes.append(item) return routes def delete_net_route(self, cidr, device): """Deletes a route according to suplied CIDR and interface device. :param cidr: The network CIDR to be removed. :param device: The network interface device to be removed. """ self._as_root('delete', cidr, 'dev', device) class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' def add(self, name): self._as_root('add', name, use_root_namespace=True) return IPWrapper(name) def delete(self, name): self._as_root('delete', name, use_root_namespace=True) def execute(self, cmds, addl_env=None, check_exit_code=True): if addl_env is None: addl_env = dict() if not self._parent.namespace: raise Exception(_('No namespace defined for parent')) else: env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in sorted(addl_env.items())]) total_cmd = (['ip', 'netns', 'exec', self._parent.namespace] + env_params + list(cmds)) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code) def exists(self, name): output = self._as_root('list', options='o', use_root_namespace=True) for line in output.split('\n'): if name == line.strip(): return True return False def device_exists(device_name, namespace=None): try: address = IPDevice(device_name, namespace).link.address except Exception as e: if 'does not exist' in six.text_type(e): return False raise return bool(address) def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True) return any(arg in line for line in stderr.split('\n')) manila-2.0.0/manila/network/neutron/0000775000567000056710000000000012701407265020543 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/network/neutron/constants.py0000664000567000056710000000130112701407107023117 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PROVIDER_NW_EXT = 'Provider Network' PORTBINDING_EXT = 'Port Binding' manila-2.0.0/manila/network/neutron/__init__.py0000664000567000056710000000000012701407107022635 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/network/neutron/neutron_network_plugin.py0000664000567000056710000002377212701407107025744 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila.common import constants from manila import exception from manila import network from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila import utils neutron_single_network_plugin_opts = [ cfg.StrOpt( 'neutron_net_id', help="Default Neutron network that will be used for share server " "creation. This opt is used only with " "class 'NeutronSingleNetworkPlugin'.", deprecated_group='DEFAULT'), cfg.StrOpt( 'neutron_subnet_id', help="Default Neutron subnet that will be used for share server " "creation. Should be assigned to network defined in opt " "'neutron_net_id'. This opt is used only with " "class 'NeutronSingleNetworkPlugin'.", deprecated_group='DEFAULT'), ] CONF = cfg.CONF class NeutronNetworkPlugin(network.NetworkBaseAPI): def __init__(self, *args, **kwargs): db_driver = kwargs.pop('db_driver', None) super(NeutronNetworkPlugin, self).__init__(db_driver=db_driver) self._neutron_api = None self._neutron_api_args = args self._neutron_api_kwargs = kwargs self._label = kwargs.pop('label', 'user') @property def label(self): return self._label @property @utils.synchronized("instantiate_neutron_api") def neutron_api(self): if not self._neutron_api: self._neutron_api = neutron_api.API(*self._neutron_api_args, **self._neutron_api_kwargs) return self._neutron_api def allocate_network(self, context, share_server, share_network=None, **kwargs): """Allocate network resources using given network information. Create neutron ports for a given neutron network and subnet, create manila db records for allocated neutron ports. :param context: RequestContext object :param share_network: share network data :param kwargs: allocations parameters given by the back-end driver. Supported params: 'count' - how many allocations should be created 'device_owner' - set owner for network allocations :rtype: list of :class: 'dict' """ if not self._has_provider_network_extension(): msg = "%s extension required" % neutron_constants.PROVIDER_NW_EXT raise exception.NetworkBadConfigurationException(reason=msg) self._verify_share_network(share_server['id'], share_network) self._save_neutron_network_data(context, share_network) self._save_neutron_subnet_data(context, share_network) allocation_count = kwargs.get('count', 1) device_owner = kwargs.get('device_owner', 'share') ports = [] for __ in range(0, allocation_count): ports.append(self._create_port(context, share_server, share_network, device_owner)) return ports def deallocate_network(self, context, share_server_id): """Deallocate neutron network resources for the given share server. Delete previously allocated neutron ports, delete manila db records for deleted ports. :param context: RequestContext object :param share_server_id: id of share server :rtype: None """ ports = self.db.network_allocations_get_for_share_server( context, share_server_id) for port in ports: self._delete_port(context, port) def _create_port(self, context, share_server, share_network, device_owner): port = self.neutron_api.create_port( share_network['project_id'], network_id=share_network['neutron_net_id'], subnet_id=share_network['neutron_subnet_id'], device_owner='manila:' + device_owner) port_dict = { 'id': port['id'], 'share_server_id': share_server['id'], 'ip_address': port['fixed_ips'][0]['ip_address'], 'mac_address': port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network['network_type'], 'segmentation_id': share_network['segmentation_id'], 'ip_version': share_network['ip_version'], 'cidr': share_network['cidr'], } return self.db.network_allocation_create(context, port_dict) def _delete_port(self, context, port): try: self.neutron_api.delete_port(port['id']) except exception.NetworkException: self.db.network_allocation_update( context, port['id'], {'status': constants.STATUS_ERROR}) raise else: self.db.network_allocation_delete(context, port['id']) def _has_provider_network_extension(self): extensions = self.neutron_api.list_extensions() return neutron_constants.PROVIDER_NW_EXT in extensions def _save_neutron_network_data(self, context, share_network): net_info = self.neutron_api.get_network( share_network['neutron_net_id']) provider_nw_dict = { 'network_type': net_info['provider:network_type'], 'segmentation_id': net_info['provider:segmentation_id'] } share_network.update(provider_nw_dict) if self.label != 'admin': self.db.share_network_update( context, share_network['id'], provider_nw_dict) def _save_neutron_subnet_data(self, context, share_network): subnet_info = self.neutron_api.get_subnet( share_network['neutron_subnet_id']) subnet_values = { 'cidr': subnet_info['cidr'], 'ip_version': subnet_info['ip_version'] } share_network.update(subnet_values) if self.label != 'admin': self.db.share_network_update( context, share_network['id'], subnet_values) class NeutronSingleNetworkPlugin(NeutronNetworkPlugin): def __init__(self, *args, **kwargs): super(NeutronSingleNetworkPlugin, self).__init__(*args, **kwargs) CONF.register_opts( neutron_single_network_plugin_opts, group=self.neutron_api.config_group_name) self.net = self.neutron_api.configuration.neutron_net_id self.subnet = self.neutron_api.configuration.neutron_subnet_id self._verify_net_and_subnet() def allocate_network(self, context, share_server, share_network=None, **kwargs): if self.label != 'admin': share_network = self._update_share_network_net_data( context, share_network) else: share_network = { 'project_id': self.neutron_api.admin_project_id, 'neutron_net_id': self.net, 'neutron_subnet_id': self.subnet, } super(NeutronSingleNetworkPlugin, self).allocate_network( context, share_server, share_network, **kwargs) def _verify_net_and_subnet(self): data = dict(net=self.net, subnet=self.subnet) if self.net and self.subnet: net = self.neutron_api.get_network(self.net) if not (net.get('subnets') and data['subnet'] in net['subnets']): raise exception.NetworkBadConfigurationException( "Subnet '%(subnet)s' does not belong to " "network '%(net)s'." % data) else: raise exception.NetworkBadConfigurationException( "Neutron net and subnet are expected to be both set. " "Got: net=%(net)s and subnet=%(subnet)s." % data) def _update_share_network_net_data(self, context, share_network): upd = dict() if share_network.get('nova_net_id') is not None: raise exception.NetworkBadConfigurationException( "Share network has nova_net_id set.") if not share_network.get('neutron_net_id') == self.net: if share_network.get('neutron_net_id') is not None: raise exception.NetworkBadConfigurationException( "Using neutron net id different from None or value " "specified in the config is forbidden for " "NeutronSingleNetworkPlugin. Allowed values: (%(net)s, " "None), received value: %(err)s" % { "net": self.net, "err": share_network.get('neutron_net_id')}) upd['neutron_net_id'] = self.net if not share_network.get('neutron_subnet_id') == self.subnet: if share_network.get('neutron_subnet_id') is not None: raise exception.NetworkBadConfigurationException( "Using neutron subnet id different from None or value " "specified in the config is forbidden for " "NeutronSingleNetworkPlugin. Allowed values: (%(snet)s, " "None), received value: %(err)s" % { "snet": self.subnet, "err": share_network.get('neutron_subnet_id')}) upd['neutron_subnet_id'] = self.subnet if upd: share_network = self.db.share_network_update( context, share_network['id'], upd) return share_network manila-2.0.0/manila/network/neutron/api.py0000664000567000056710000003500712701407107021666 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from manila.common import client_auth from manila import context from manila import exception from manila.i18n import _LE from manila.network.neutron import constants as neutron_constants NEUTRON_GROUP = 'neutron' neutron_deprecated_opts = [ cfg.StrOpt( 'neutron_admin_username', default='neutron', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please use " "[neutron] username instead.", help='Username for connecting to neutron in admin context.'), cfg.StrOpt( 'neutron_admin_password', help='Password for connecting to neutron in admin context.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please use " "[neutron] password instead.", secret=True), cfg.StrOpt( 'neutron_admin_project_name', default='service', deprecated_group='DEFAULT', deprecated_name='neutron_admin_tenant_name', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please use " "[neutron] project instead.", help='Project name for connecting to Neutron in admin context.'), cfg.StrOpt( 'neutron_admin_auth_url', default='http://localhost:5000/v2.0', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please use " "[neutron] auth_url instead.", help='Auth URL for connecting to neutron in admin context.'), ] neutron_opts = [ cfg.StrOpt( 'url', default='http://127.0.0.1:9696', deprecated_group="DEFAULT", deprecated_name="neutron_url", help='URL for connecting to neutron.'), cfg.IntOpt( 'url_timeout', default=30, deprecated_group="DEFAULT", deprecated_name="neutron_url_timeout", help='Timeout value for connecting to neutron in seconds.'), cfg.BoolOpt( 'api_insecure', default=False, deprecated_group="DEFAULT", help='If set, ignore any SSL validation issues.'), cfg.StrOpt( 'auth_strategy', default='keystone', deprecated_group="DEFAULT", help='Auth strategy for connecting to neutron in admin context.'), cfg.StrOpt( 'ca_certificates_file', deprecated_for_removal=True, deprecated_group="DEFAULT", help='Location of CA certificates file to use for ' 'neutron client requests.'), cfg.StrOpt( 'region_name', help='Region name for connecting to neutron in admin context') ] CONF = cfg.CONF LOG = log.getLogger(__name__) def list_opts(): return client_auth.AuthClientLoader.list_opts(NEUTRON_GROUP) class API(object): """API for interacting with the neutron 2.x API. :param configuration: instance of config or config group. """ def __init__(self, config_group_name=None): self.config_group_name = config_group_name or 'DEFAULT' ks_loading.register_session_conf_options(CONF, NEUTRON_GROUP) ks_loading.register_auth_conf_options(CONF, NEUTRON_GROUP) CONF.register_opts(neutron_opts, NEUTRON_GROUP) CONF.register_opts(neutron_deprecated_opts, group=self.config_group_name) self.configuration = getattr(CONF, self.config_group_name, CONF) self.last_neutron_extension_sync = None self.extensions = {} self.auth_obj = None @property def client(self): return self.get_client(context.get_admin_context()) def get_client(self, context): if not self.auth_obj: config = CONF[self.config_group_name] v2_deprecated_opts = { 'username': config.neutron_admin_username, 'password': config.neutron_admin_password, 'tenant_name': config.neutron_admin_project_name, 'auth_url': config.neutron_admin_auth_url, } self.auth_obj = client_auth.AuthClientLoader( client_class=clientv20.Client, exception_module=neutron_client_exc, cfg_group=NEUTRON_GROUP, deprecated_opts_for_v2=v2_deprecated_opts) return self.auth_obj.get_client(self, context) @property def admin_project_id(self): if self.client.httpclient.auth_token is None: try: self.client.httpclient.authenticate() except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) return self.client.httpclient.get_project_id() def get_all_admin_project_networks(self): search_opts = {'tenant_id': self.admin_project_id, 'shared': False} nets = self.client.list_networks(**search_opts).get('networks', []) return nets def create_port(self, tenant_id, network_id, host_id=None, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None, mac_address=None, security_group_ids=None, dhcp_opts=None): try: port_req_body = {'port': {}} port_req_body['port']['network_id'] = network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = tenant_id if security_group_ids: port_req_body['port']['security_groups'] = security_group_ids if mac_address: port_req_body['port']['mac_address'] = mac_address if self._has_port_binding_extension() and host_id: port_req_body['port']['binding:host_id'] = host_id if dhcp_opts is not None: port_req_body['port']['extra_dhcp_opts'] = dhcp_opts if subnet_id: fixed_ip_dict = {'subnet_id': subnet_id} if fixed_ip: fixed_ip_dict.update({'ip_address': fixed_ip}) port_req_body['port']['fixed_ips'] = [fixed_ip_dict] if device_owner: port_req_body['port']['device_owner'] = device_owner if device_id: port_req_body['port']['device_id'] = device_id port = self.client.create_port(port_req_body).get('port', {}) return port except neutron_client_exc.NeutronClientException as e: LOG.exception(_LE('Neutron error creating port on network %s'), network_id) if e.status_code == 409: raise exception.PortLimitExceeded() raise exception.NetworkException(code=e.status_code, message=e.message) def delete_port(self, port_id): try: self.client.delete_port(port_id) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def delete_subnet(self, subnet_id): try: self.client.delete_subnet(subnet_id) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def list_ports(self, **search_opts): """List ports for the client based on search options.""" return self.client.list_ports(**search_opts).get('ports') def show_port(self, port_id): """Return the port for the client given the port id.""" try: return self.client.show_port(port_id).get('port') except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def get_all_networks(self): """Get all networks for client.""" return self.client.list_networks().get('networks') def get_network(self, network_uuid): """Get specific network for client.""" try: network = self.client.show_network(network_uuid).get('network', {}) return network except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def get_subnet(self, subnet_uuid): """Get specific subnet for client.""" try: return self.client.show_subnet(subnet_uuid).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def list_extensions(self): extensions_list = self.client.list_extensions().get('extensions') return {ext['name']: ext for ext in extensions_list} def _has_port_binding_extension(self): if not self.extensions: self.extensions = self.list_extensions() return neutron_constants.PORTBINDING_EXT in self.extensions def router_create(self, tenant_id, name): router_req_body = {'router': {}} router_req_body['router']['tenant_id'] = tenant_id router_req_body['router']['name'] = name try: return self.client.create_router(router_req_body).get('router', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def network_create(self, tenant_id, name): network_req_body = {'network': {}} network_req_body['network']['tenant_id'] = tenant_id network_req_body['network']['name'] = name try: return self.client.create_network( network_req_body).get('network', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def subnet_create(self, tenant_id, net_id, name, cidr): subnet_req_body = {'subnet': {}} subnet_req_body['subnet']['tenant_id'] = tenant_id subnet_req_body['subnet']['name'] = name subnet_req_body['subnet']['network_id'] = net_id subnet_req_body['subnet']['cidr'] = cidr subnet_req_body['subnet']['ip_version'] = 4 try: return self.client.create_subnet( subnet_req_body).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_add_interface(self, router_id, subnet_id, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id try: self.client.add_interface_router(router_id, body) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_remove_interface(self, router_id, subnet_id, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id try: self.client.remove_interface_router(router_id, body) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_list(self): try: return self.client.list_routers().get('routers', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def update_port_fixed_ips(self, port_id, fixed_ips): try: port_req_body = {'port': fixed_ips} port = self.client.update_port( port_id, port_req_body).get('port', {}) return port except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def show_router(self, router_id): try: return self.client.show_router(router_id).get('router', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_update_routes(self, router_id, routes): try: router_req_body = {'router': routes} port = self.client.update_router( router_id, router_req_body).get('router', {}) return port except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def update_subnet(self, subnet_uuid, name): """Update specific subnet for client.""" subnet_req_body = {'subnet': {'name': name}} try: return self.client.update_subnet( subnet_uuid, subnet_req_body).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) manila-2.0.0/manila/network/__init__.py0000664000567000056710000000510312701407107021154 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_utils import importutils import six from manila.db import base as db_base from manila import exception from manila.i18n import _ network_opts = [ cfg.StrOpt( 'network_api_class', default='manila.network.neutron.' 'neutron_network_plugin.NeutronNetworkPlugin', deprecated_group='DEFAULT', help='The full class name of the Networking API class to use.'), ] CONF = cfg.CONF def API(config_group_name=None, label='user'): """Selects class and config group of network plugin. :param config_group_name: name of config group to be used for registration of networking opts. :returns: instance of networking plugin class """ CONF.register_opts(network_opts, group=config_group_name) if config_group_name: network_api_class = getattr(CONF, config_group_name).network_api_class else: network_api_class = CONF.network_api_class cls = importutils.import_class(network_api_class) return cls(config_group_name=config_group_name, label=label) @six.add_metaclass(abc.ABCMeta) class NetworkBaseAPI(db_base.Base): """User network plugin for setting up main net interfaces.""" def __init__(self, db_driver=None): super(NetworkBaseAPI, self).__init__(db_driver=db_driver) def _verify_share_network(self, share_server_id, share_network): if share_network is None: msg = _("'Share network' is not provided for setting up " "network interfaces for 'Share server' " "'%s'.") % share_server_id raise exception.NetworkBadConfigurationException(reason=msg) @abc.abstractmethod def allocate_network(self, context, share_server, share_network=None, **kwargs): pass @abc.abstractmethod def deallocate_network(self, context, share_server_id): pass manila-2.0.0/manila/network/standalone_network_plugin.py0000664000567000056710000003152012701407107024676 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log import six from manila.common import constants from manila import exception from manila.i18n import _ from manila import network from manila import utils standalone_network_plugin_opts = [ cfg.StrOpt( 'standalone_network_plugin_gateway', help="Gateway IPv4 address that should be used. Required.", deprecated_group='DEFAULT'), cfg.StrOpt( 'standalone_network_plugin_mask', help="Network mask that will be used. Can be either decimal " "like '24' or binary like '255.255.255.0'. Required.", deprecated_group='DEFAULT'), cfg.StrOpt( 'standalone_network_plugin_network_type', help="Network type, such as 'flat', 'vlan', 'vxlan' or 'gre'. " "Empty value is alias for 'flat'. " "It will be assigned to share-network and share drivers will be " "able to use this for network interfaces within provisioned " "share servers. Optional.", choices=['flat', 'vlan', 'vxlan', 'gre'], deprecated_group='DEFAULT'), cfg.IntOpt( 'standalone_network_plugin_segmentation_id', help="Set it if network has segmentation (VLAN, VXLAN, etc...). " "It will be assigned to share-network and share drivers will be " "able to use this for network interfaces within provisioned " "share servers. Optional. Example: 1001", deprecated_group='DEFAULT'), cfg.ListOpt( 'standalone_network_plugin_allowed_ip_ranges', help="Can be IP address, range of IP addresses or list of addresses " "or ranges. Contains addresses from IP network that are allowed " "to be used. If empty, then will be assumed that all host " "addresses from network can be used. Optional. " "Examples: 10.0.0.10 or 10.0.0.10-10.0.0.20 or " "10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50", deprecated_group='DEFAULT'), cfg.IntOpt( 'standalone_network_plugin_ip_version', default=4, help="IP version of network. Optional." "Allowed values are '4' and '6'. Default value is '4'.", deprecated_group='DEFAULT'), ] CONF = cfg.CONF LOG = log.getLogger(__name__) class StandaloneNetworkPlugin(network.NetworkBaseAPI): """Standalone network plugin for share drivers. This network plugin can be used with any network platform. It can serve flat networks as well as segmented. It does not require some specific network services in OpenStack like Neutron or Nova. The only thing that plugin does is reservation and release of IP addresses from some network. """ def __init__(self, config_group_name=None, db_driver=None, label='user'): super(StandaloneNetworkPlugin, self).__init__(db_driver=db_driver) self.config_group_name = config_group_name or 'DEFAULT' CONF.register_opts( standalone_network_plugin_opts, group=self.config_group_name) self.configuration = getattr(CONF, self.config_group_name, CONF) self._set_persistent_network_data() self._label = label LOG.debug( "\nStandalone network plugin data for config group " "'%(config_group)s': \n" "IP version - %(ip_version)s\n" "Used network - %(net)s\n" "Used gateway - %(gateway)s\n" "Used network type - %(network_type)s\n" "Used segmentation ID - %(segmentation_id)s\n" "Allowed CIDRs - %(cidrs)s\n" "Original allowed IP ranges - %(ip_ranges)s\n" "Reserved IP addresses - %(reserved)s\n", dict( config_group=self.config_group_name, ip_version=self.ip_version, net=six.text_type(self.net), gateway=self.gateway, network_type=self.network_type, segmentation_id=self.segmentation_id, cidrs=self.allowed_cidrs, ip_ranges=self.allowed_ip_ranges, reserved=self.reserved_addresses)) @property def label(self): return self._label def _set_persistent_network_data(self): """Sets persistent data for whole plugin.""" self.network_type = ( self.configuration.standalone_network_plugin_network_type) self.segmentation_id = ( self.configuration.standalone_network_plugin_segmentation_id) self.gateway = self.configuration.standalone_network_plugin_gateway self.mask = self.configuration.standalone_network_plugin_mask self.allowed_ip_ranges = ( self.configuration.standalone_network_plugin_allowed_ip_ranges) self.ip_version = int( self.configuration.standalone_network_plugin_ip_version) self.net = self._get_network() self.allowed_cidrs = self._get_list_of_allowed_addresses() self.reserved_addresses = ( six.text_type(self.net.network), self.gateway, six.text_type(self.net.broadcast)) def _get_network(self): """Returns IPNetwork object calculated from gateway and netmask.""" if not isinstance(self.gateway, six.string_types): raise exception.NetworkBadConfigurationException( _("Configuration option 'standalone_network_plugin_gateway' " "is required and has improper value '%s'.") % self.gateway) if not isinstance(self.mask, six.string_types): raise exception.NetworkBadConfigurationException( _("Configuration option 'standalone_network_plugin_mask' is " "required and has improper value '%s'.") % self.mask) try: return netaddr.IPNetwork(self.gateway + '/' + self.mask) except netaddr.AddrFormatError as e: raise exception.NetworkBadConfigurationException( reason=e) def _get_list_of_allowed_addresses(self): """Returns list of CIDRs that can be used for getting IP addresses. Reads information provided via configuration, such as gateway, netmask, segmentation ID and allowed IP ranges, then performs validation of provided data. :returns: list of CIDRs as text types. :raises: exception.NetworkBadConfigurationException """ cidrs = [] if self.allowed_ip_ranges: for ip_range in self.allowed_ip_ranges: ip_range_start = ip_range_end = None if utils.is_valid_ip_address(ip_range, self.ip_version): ip_range_start = ip_range_end = ip_range elif '-' in ip_range: ip_range_list = ip_range.split('-') if len(ip_range_list) == 2: ip_range_start = ip_range_list[0] ip_range_end = ip_range_list[1] for ip in ip_range_list: utils.is_valid_ip_address(ip, self.ip_version) else: msg = _("Wrong value for IP range " "'%s' was provided.") % ip_range raise exception.NetworkBadConfigurationException( reason=msg) else: msg = _("Config option " "'standalone_network_plugin_allowed_ip_ranges' " "has incorrect value " "'%s'") % self.allowed_ip_ranges raise exception.NetworkBadConfigurationException( reason=msg) range_instance = netaddr.IPRange(ip_range_start, ip_range_end) if range_instance not in self.net: data = dict( range=six.text_type(range_instance), net=six.text_type(self.net), gateway=self.gateway, netmask=self.net.netmask) msg = _("One of provided allowed IP ranges ('%(range)s') " "does not fit network '%(net)s' combined from " "gateway '%(gateway)s' and netmask " "'%(netmask)s'.") % data raise exception.NetworkBadConfigurationException( reason=msg) cidrs.extend( six.text_type(cidr) for cidr in range_instance.cidrs()) else: if self.net.version != self.ip_version: msg = _("Configured invalid IP version '%(conf_v)s', network " "has version ""'%(net_v)s'") % dict( conf_v=self.ip_version, net_v=self.net.version) raise exception.NetworkBadConfigurationException(reason=msg) cidrs.append(six.text_type(self.net)) return cidrs def _get_available_ips(self, context, amount): """Returns IP addresses from allowed IP range if there are unused IPs. :returns: IP addresses as list of text types :raises: exception.NetworkBadConfigurationException """ ips = [] if amount < 1: return ips iterator = netaddr.iter_unique_ips(*self.allowed_cidrs) for ip in iterator: ip = six.text_type(ip) if (ip in self.reserved_addresses or self.db.network_allocations_get_by_ip_address(context, ip)): continue else: ips.append(ip) if len(ips) == amount: return ips msg = _("No available IP addresses left in CIDRs %(cidrs)s. " "Requested amount of IPs to be provided '%(amount)s', " "available only '%(available)s'.") % { 'cidrs': self.allowed_cidrs, 'amount': amount, 'available': len(ips)} raise exception.NetworkBadConfigurationException(reason=msg) def _save_network_info(self, context, share_network): """Update share-network with plugin specific data.""" data = { 'network_type': self.network_type, 'segmentation_id': self.segmentation_id, 'cidr': six.text_type(self.net.cidr), 'ip_version': self.ip_version, } share_network.update(data) if self.label != 'admin': self.db.share_network_update(context, share_network['id'], data) @utils.synchronized( "allocate_network_for_standalone_network_plugin", external=True) def allocate_network(self, context, share_server, share_network=None, **kwargs): """Allocate network resources using one dedicated network. This one has interprocess lock to avoid concurrency in creation of share servers with same IP addresses using different share-networks. """ allocation_count = kwargs.get('count', 1) if self.label != 'admin': self._verify_share_network(share_server['id'], share_network) else: share_network = share_network or {} self._save_network_info(context, share_network) allocations = [] ip_addresses = self._get_available_ips(context, allocation_count) for ip_address in ip_addresses: data = { 'share_server_id': share_server['id'], 'ip_address': ip_address, 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network['network_type'], 'segmentation_id': share_network['segmentation_id'], 'cidr': share_network['cidr'], 'ip_version': share_network['ip_version'], } allocations.append( self.db.network_allocation_create(context, data)) return allocations def deallocate_network(self, context, share_server_id): """Deallocate network resources for share server.""" allocations = self.db.network_allocations_get_for_share_server( context, share_server_id) for allocation in allocations: self.db.network_allocation_delete(context, allocation['id']) manila-2.0.0/manila/network/nova_network_plugin.py0000664000567000056710000002247412701407107023521 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log import six from manila.common import constants from manila.compute import nova import manila.context from manila import exception from manila.i18n import _ from manila import network from manila import utils nova_single_network_plugin_opts = [ cfg.StrOpt( 'nova_single_network_plugin_net_id', help="Default Nova network that will be used for share servers. " "This opt is used only with class 'NovaSingleNetworkPlugin'.", deprecated_group='DEFAULT'), ] CONF = cfg.CONF LOG = log.getLogger(__name__) class NovaNetworkPlugin(network.NetworkBaseAPI): """Nova network plugin for share drivers. This plugin uses Nova networks provided within 'share-network' entities. """ def __init__(self, config_group_name=None, db_driver=None, label=None): super(NovaNetworkPlugin, self).__init__(db_driver=db_driver) self.config_group_name = config_group_name or 'DEFAULT' self._label = label or 'user' self.admin_context = manila.context.get_admin_context() self.nova_api = nova.API() @property def label(self): return self._label @utils.synchronized( "allocate_network_for_nova_network_plugin", external=True) def allocate_network(self, context, share_server, share_network, **kwargs): # NOTE(vponomaryov): This one is made as wrapper for inheritance # purposes to avoid deadlock. return self._allocate_network( context, share_server, share_network, **kwargs) def _allocate_network( self, context, share_server, share_network, **kwargs): """Allocate network resources using one Nova network.""" allocations = [] allocation_count = kwargs.get('count', 1) if allocation_count < 1: return allocations nova_net_id = share_network.get('nova_net_id') if not nova_net_id: raise exception.NetworkException( _("'nova_net_id' is not provided with share network.")) # NOTE(vponomaryov): nova network should be taken using admin context # because several required attrs of network are available # only for admins. nova_net = self.nova_api.network_get(self.admin_context, nova_net_id) self._save_network_info(context, nova_net, share_network) ip_addresses = self._get_available_ips( context, nova_net, allocation_count) for ip_address in ip_addresses: data = { 'share_server_id': share_server['id'], 'ip_address': ip_address, 'status': constants.STATUS_ACTIVE, 'label': self.label, 'cidr': share_network['cidr'], 'ip_version': share_network['ip_version'], 'segmentation_id': share_network['segmentation_id'], 'network_type': share_network['network_type'], } self.nova_api.fixed_ip_reserve(self.admin_context, ip_address) allocations.append( self.db.network_allocation_create(context, data)) return allocations def _get_available_ips(self, context, nova_net, amount): """Returns unused IP addresses from provided Nova network. :param context: RequestContext instance :param nova_net: dict -- dictionary with data of nova network :param amount: int - amount of IP addresses to return :returns: IP addresses as list of text types :raises: exception.NetworkBadConfigurationException """ cidr = nova_net['cidr'] or nova_net['cidr_v6'] reserved = ( six.text_type(netaddr.IPNetwork(cidr).network), nova_net['gateway'], nova_net['gateway_v6'], nova_net['dhcp_server'], nova_net['broadcast'], nova_net['vpn_private_address'], nova_net['vpn_public_address'], nova_net['dns1'], nova_net['dns2']) ips = [] iterator = netaddr.iter_unique_ips(cidr) for ip in iterator: ip = six.text_type(ip) if ip in reserved: # This IP address is reserved for service needs continue elif self.db.network_allocations_get_by_ip_address(context, ip): # Some existing share server already uses this IP address continue fixed_ip = self.nova_api.fixed_ip_get(self.admin_context, ip) if fixed_ip.get('host') or fixed_ip.get('hostname'): # Some Nova VM already uses this IP address continue ips.append(ip) if len(ips) == amount: return ips msg = _("No available IP addresses left in network '%(net_id)s' with " "CIDR %(cidr)s. Requested amount of IPs to be provided " "'%(amount)s', available only '%(available)s'") % dict( net_id=nova_net['id'], cidr=cidr, amount=amount, available=len(ips)) LOG.error(msg) raise exception.NetworkBadConfigurationException(reason=msg) def _save_network_info(self, context, nova_net, share_network): """Update 'share-network' with plugin specific data.""" data = { 'cidr': (nova_net['cidr'] or nova_net['cidr_v6']), 'ip_version': (4 if nova_net['cidr'] else 6), 'segmentation_id': nova_net['vlan'], 'network_type': ('vlan' if nova_net['vlan'] else 'flat'), } share_network.update(data) if self.label != 'admin': self.db.share_network_update(context, share_network['id'], data) def deallocate_network(self, context, share_server_id): """Deallocate network resources for share server.""" allocations = self.db.network_allocations_get_for_share_server( context, share_server_id) for allocation in allocations: self.db.network_allocation_delete(context, allocation['id']) self.nova_api.fixed_ip_unreserve( self.admin_context, allocation['ip_address']) class NovaSingleNetworkPlugin(NovaNetworkPlugin): """Nova network plugin for share drivers. This plugin uses only one network that is predefined within config option 'nova_single_network_plugin_net_id' and stores all required info in provided 'share-network' that, further, can be used by share drivers. """ def __init__(self, *args, **kwargs): super(NovaSingleNetworkPlugin, self).__init__(*args, **kwargs) CONF.register_opts( nova_single_network_plugin_opts, group=self.config_group_name) self.net_id = getattr(CONF, self.config_group_name, CONF).nova_single_network_plugin_net_id if not self.net_id: msg = _("Nova network is not set") LOG.error(msg) raise exception.NetworkBadConfigurationException(reason=msg) @utils.synchronized( "allocate_network_for_nova_network_plugin", external=True) def allocate_network(self, context, share_server, share_network, **kwargs): if self.label != 'admin': share_network = self._update_share_network_net_data( context, share_network) else: share_network = {'nova_net_id': self.net_id} return self._allocate_network( context, share_server, share_network, **kwargs) def _update_share_network_net_data(self, context, share_network): neutron_data = share_network.get( 'neutron_net_id', share_network.get('neutron_subnet_id')) if neutron_data: msg = _("'share-network' with id '%s' should not contain Neutron " "data. Either remove it or use another " "'share-network'") % share_network['id'] LOG.error(msg) raise exception.NetworkBadConfigurationException(reason=msg) nova_net_id = share_network.get('nova_net_id') if nova_net_id and nova_net_id != self.net_id: msg = _("'share-network' with id '%(sn_id)s' already contains " "Nova network id '%(provided)s' that is different from " "what is defined in config '%(allowed)s'. Either remove " "incorrect network id or set it the same") % dict( sn_id=share_network['id'], provided=nova_net_id, allowed=self.net_id) LOG.error(msg) raise exception.NetworkBadConfigurationException(reason=msg) elif not nova_net_id: share_network = self.db.share_network_update( context, share_network['id'], dict(nova_net_id=self.net_id)) return share_network manila-2.0.0/manila/i18n.py0000664000567000056710000000257012701407107016510 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ import oslo_i18n DOMAIN = 'manila' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def enable_lazy(): return oslo_i18n.enable_lazy() def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) manila-2.0.0/manila/context.py0000664000567000056710000001216612701407107017417 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of manila.""" import copy from oslo_context import context from oslo_log import log from oslo_utils import timeutils import six from manila.i18n import _, _LW from manila import policy LOG = log.getLogger(__name__) class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ user = kwargs.pop('user', None) tenant = kwargs.pop('tenant', None) if kwargs: LOG.warning(_LW('Arguments dropped when creating context: %s.'), str(kwargs)) super(RequestContext, self).__init__(auth_token=auth_token, user=user_id or user, tenant=project_id or tenant, is_admin=is_admin, request_id=request_id, overwrite=overwrite, roles=roles) self.user_id = self.user self.project_id = self.tenant if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: self.service_catalog = [s for s in service_catalog if s.get('type') in ('compute', 'volume')] else: self.service_catalog = [] self.quota_class = quota_class def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): values = super(RequestContext, self).to_dict() values.update({ 'user_id': self.user_id, 'project_id': self.project_id, 'read_deleted': self.read_deleted, 'remote_address': self.remote_address, 'timestamp': self.timestamp.isoformat(), 'quota_class': self.quota_class, 'service_catalog': self.service_catalog}) return values @classmethod def from_dict(cls, values): return cls(**values) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" ctx = copy.deepcopy(self) ctx.is_admin = True if 'admin' not in ctx.roles: ctx.roles.append('admin') if read_deleted is not None: ctx.read_deleted = read_deleted return ctx def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) manila-2.0.0/manila/compute/0000775000567000056710000000000012701407265017034 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/compute/__init__.py0000664000567000056710000000217612701407107021146 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_config.cfg import oslo_utils.importutils _compute_opts = [ oslo_config.cfg.StrOpt('compute_api_class', default='manila.compute.nova.API', help='The full class name of the ' 'Compute API class to use.'), ] oslo_config.cfg.CONF.register_opts(_compute_opts) def API(): importutils = oslo_utils.importutils compute_api_class = oslo_config.cfg.CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls() manila-2.0.0/manila/compute/nova.py0000664000567000056710000003207212701407112020344 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Nova. """ from keystoneauth1 import loading as ks_loading from novaclient import client as nova_client from novaclient import exceptions as nova_exception from novaclient import utils from oslo_config import cfg from oslo_log import log import six from manila.common import client_auth from manila.common.config import core_opts from manila.db import base from manila import exception from manila.i18n import _ NOVA_GROUP = 'nova' nova_deprecated_opts = [ cfg.StrOpt('nova_admin_username', default='nova', help='Nova admin username.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [nova] username instead."), cfg.StrOpt('nova_admin_password', help='Nova admin password.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [nova] password instead."), cfg.StrOpt('nova_admin_tenant_name', default='service', help='Nova admin tenant name.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [nova] tenant instead."), cfg.StrOpt('nova_admin_auth_url', default='http://localhost:5000/v2.0', help='Identity service URL.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer. Please " "use [nova] url instead."), cfg.StrOpt('nova_catalog_info', default='compute:nova:publicURL', help='Info to match when looking for nova in the service ' 'catalog. Format is separated values of the form: ' '::', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer."), cfg.StrOpt('nova_catalog_admin_info', default='compute:nova:adminURL', help='Same as nova_catalog_info, but for admin endpoint.', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_reason="This option isn't used any longer."), ] nova_opts = [ cfg.StrOpt('api_microversion', default='2.10', deprecated_group="DEFAULT", deprecated_name="nova_api_microversion", help='Version of Nova API to be used.'), cfg.StrOpt('ca_certificates_file', deprecated_group="DEFAULT", deprecated_name="nova_ca_certificates_file", help='Location of CA certificates file to use for nova client ' 'requests.'), cfg.BoolOpt('api_insecure', default=False, deprecated_group="DEFAULT", deprecated_name="nova_api_insecure", help='Allow to perform insecure SSL requests to nova.'), ] CONF = cfg.CONF CONF.register_opts(nova_deprecated_opts) CONF.register_opts(core_opts) CONF.register_opts(nova_opts, NOVA_GROUP) ks_loading.register_session_conf_options(CONF, NOVA_GROUP) ks_loading.register_auth_conf_options(CONF, NOVA_GROUP) LOG = log.getLogger(__name__) def list_opts(): return client_auth.AuthClientLoader.list_opts(NOVA_GROUP) auth_obj = None def novaclient(context): global auth_obj if not auth_obj: deprecated_opts_for_v2 = { 'username': CONF.nova_admin_username, 'password': CONF.nova_admin_password, 'tenant_name': CONF.nova_admin_tenant_name, 'auth_url': CONF.nova_admin_auth_url, } auth_obj = client_auth.AuthClientLoader( client_class=nova_client.Client, exception_module=nova_exception, cfg_group=NOVA_GROUP, deprecated_opts_for_v2=deprecated_opts_for_v2) return auth_obj.get_client(context, version=CONF[NOVA_GROUP].api_microversion, insecure=CONF[NOVA_GROUP].api_insecure, cacert=CONF[NOVA_GROUP].ca_certificates_file) def _untranslate_server_summary_view(server): """Maps keys for servers summary view.""" d = {} d['id'] = server.id d['status'] = server.status d['flavor'] = server.flavor['id'] d['name'] = server.name d['image'] = server.image['id'] d['created'] = server.created d['addresses'] = server.addresses d['networks'] = server.networks d['tenant_id'] = server.tenant_id d['user_id'] = server.user_id d['security_groups'] = getattr(server, 'security_groups', []) return d def _to_dict(obj): if isinstance(obj, dict): return obj elif hasattr(obj, 'to_dict'): return obj.to_dict() else: return obj.__dict__ def translate_server_exception(method): """Transforms the exception for the instance. Note: keeps its traceback intact. """ @six.wraps(method) def wrapper(self, ctx, instance_id, *args, **kwargs): try: res = method(self, ctx, instance_id, *args, **kwargs) return res except nova_exception.ClientException as e: if isinstance(e, nova_exception.NotFound): raise exception.InstanceNotFound(instance_id=instance_id) elif isinstance(e, nova_exception.BadRequest): raise exception.InvalidInput(reason=six.text_type(e)) else: raise exception.ManilaException(e) return wrapper class API(base.Base): """API for interacting with novaclient.""" def server_create(self, context, name, image, flavor, key_name=None, user_data=None, security_groups=None, block_device_mapping=None, block_device_mapping_v2=None, nics=None, availability_zone=None, instance_count=1, admin_pass=None, meta=None): return _untranslate_server_summary_view( novaclient(context).servers.create( name, image, flavor, userdata=user_data, security_groups=security_groups, key_name=key_name, block_device_mapping=block_device_mapping, block_device_mapping_v2=block_device_mapping_v2, nics=nics, availability_zone=availability_zone, min_count=instance_count, admin_pass=admin_pass, meta=meta) ) def server_delete(self, context, instance): novaclient(context).servers.delete(instance) @translate_server_exception def server_get(self, context, instance_id): return _untranslate_server_summary_view( novaclient(context).servers.get(instance_id) ) def server_get_by_name_or_id(self, context, instance_name_or_id): try: server = utils.find_resource( novaclient(context).servers, instance_name_or_id) except nova_exception.CommandError: # we did not find the server in the current tenant, # and proceed searching in all tenants try: server = utils.find_resource( novaclient(context).servers, instance_name_or_id, all_tenants=True) except nova_exception.CommandError as e: msg = _("Failed to get Nova VM. %s") % e raise exception.ManilaException(msg) return _untranslate_server_summary_view(server) @translate_server_exception def server_pause(self, context, instance_id): novaclient(context).servers.pause(instance_id) @translate_server_exception def server_unpause(self, context, instance_id): novaclient(context).servers.unpause(instance_id) @translate_server_exception def server_suspend(self, context, instance_id): novaclient(context).servers.suspend(instance_id) @translate_server_exception def server_resume(self, context, instance_id): novaclient(context).servers.resume(instance_id) @translate_server_exception def server_reboot(self, context, instance_id, soft_reboot=False): hardness = 'SOFT' if soft_reboot else 'HARD' novaclient(context).servers.reboot(instance_id, hardness) @translate_server_exception def server_rebuild(self, context, instance_id, image_id, password=None): return _untranslate_server_summary_view( novaclient(context).servers.rebuild(instance_id, image_id, password) ) @translate_server_exception def instance_volume_attach(self, context, instance_id, volume_id, device=None): if device == 'auto': device = None return novaclient(context).volumes.create_server_volume(instance_id, volume_id, device) @translate_server_exception def instance_volume_detach(self, context, instance_id, att_id): return novaclient(context).volumes.delete_server_volume(instance_id, att_id) @translate_server_exception def instance_volumes_list(self, context, instance_id): from manila.volume import cinder volumes = novaclient(context).volumes.get_server_volumes(instance_id) for volume in volumes: volume_data = cinder.cinderclient(context).volumes.get(volume.id) volume.name = volume_data.name return volumes @translate_server_exception def server_update(self, context, instance_id, name): return _untranslate_server_summary_view( novaclient(context).servers.update(instance_id, name=name) ) def update_server_volume(self, context, instance_id, attachment_id, new_volume_id): novaclient(context).volumes.update_server_volume(instance_id, attachment_id, new_volume_id) def keypair_create(self, context, name): return novaclient(context).keypairs.create(name) def keypair_import(self, context, name, public_key): return novaclient(context).keypairs.create(name, public_key) def keypair_delete(self, context, keypair_id): novaclient(context).keypairs.delete(keypair_id) def keypair_list(self, context): return novaclient(context).keypairs.list() def image_list(self, context): return novaclient(context).images.list() def add_security_group_to_server(self, context, server, security_group): return novaclient(context).servers.add_security_group(server, security_group) def security_group_create(self, context, name, description=""): return novaclient(context).security_groups.create(name, description) def security_group_get(self, context, group_id): return novaclient(context).security_groups.get(group_id) def security_group_list(self, context, search_opts=None): return novaclient(context).security_groups.list(search_opts) def security_group_rule_create(self, context, parent_group_id, ip_protocol=None, from_port=None, to_port=None, cidr=None, group_id=None): return novaclient(context).security_group_rules.create( parent_group_id, ip_protocol, from_port, to_port, cidr, group_id) def security_group_rule_delete(self, context, rule): return novaclient(context).security_group_rules.delete(rule) def fixed_ip_reserve(self, context, fixed_ip): return novaclient(context).fixed_ips.reserve(fixed_ip) def fixed_ip_unreserve(self, context, fixed_ip): return novaclient(context).fixed_ips.unreserve(fixed_ip) def fixed_ip_get(self, context, fixed_ip): return _to_dict(novaclient(context).fixed_ips.get(fixed_ip)) def network_get(self, context, network_id): """Return network data by its ID.""" return _to_dict(novaclient(context).networks.get(network_id)) manila-2.0.0/manila/hacking/0000775000567000056710000000000012701407265016764 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/hacking/__init__.py0000664000567000056710000000000012701407107021056 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/hacking/checks.py0000664000567000056710000002217512701407112020574 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012, Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import re import pep8 """ Guidelines for writing new hacking checks - Use only for Manila specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range M3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the M3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to manila/tests/test_hacking.py """ UNDERSCORE_IMPORT_FILES = [] log_translation = re.compile( r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")") log_translation_LC = re.compile( r"(.)*LOG\.(critical)\(\s*(_\(|'|\")") log_translation_LE = re.compile( r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")") log_translation_LI = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_LW = re.compile( r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")") translated_log = re.compile( r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)" "\(\s*_\(\s*('|\")") string_translation = re.compile(r"[^_]*_\(\s*('|\")") underscore_import_check = re.compile(r"(.)*import _$") underscore_import_check_multi = re.compile(r"(.)*import (.)*_, (.)*") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ CHECK_DESC = 'No check message specified' def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. M319 """ if logical_line.startswith("LOG.debug(_("): yield(0, "M319 Don't translate debug level logs") def validate_log_translations(logical_line, physical_line, filename): # Translations are not required in the test and tempest # directories. if ("manila/tests" in filename or "manila_tempest_tests" in filename or "contrib/tempest" in filename): return if pep8.noqa(physical_line): return msg = "M327: LOG.critical messages require translations `_LC()`!" if log_translation_LC.match(logical_line): yield (0, msg) msg = ("M328: LOG.error and LOG.exception messages require translations " "`_LE()`!") if log_translation_LE.match(logical_line): yield (0, msg) msg = "M329: LOG.info messages require translations `_LI()`!" if log_translation_LI.match(logical_line): yield (0, msg) msg = "M330: LOG.warning messages require translations `_LW()`!" if log_translation_LW.match(logical_line): yield (0, msg) msg = "M331: Log messages require translations!" if log_translation.match(logical_line): yield (0, msg) def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or underscore_import_check_multi.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif (translated_log.match(logical_line) or string_translation.match(logical_line)): yield(0, "M323: Found use of _() without explicit import of _ !") class CheckForStrExc(BaseASTChecker): """Checks for the use of str() on an exception. This currently only handles the case where str() is used in the scope of an exception handler. If the exception is passed into a function, returned from an assertRaises, or used on an exception created in the same scope, this does not catch it. """ CHECK_DESC = ('M325 str() cannot be used on an exception. ' 'Remove or use six.text_type()') def __init__(self, tree, filename): super(CheckForStrExc, self).__init__(tree, filename) self.name = [] self.already_checked = [] def visit_TryExcept(self, node): for handler in node.handlers: if handler.name: self.name.append(handler.name.id) super(CheckForStrExc, self).generic_visit(node) self.name = self.name[:-1] else: super(CheckForStrExc, self).generic_visit(node) def visit_Call(self, node): if self._check_call_names(node, ['str']): if node not in self.already_checked: self.already_checked.append(node) if isinstance(node.args[0], ast.Name): if node.args[0].id in self.name: self.add_error(node.args[0]) super(CheckForStrExc, self).generic_visit(node) class CheckForTransAdd(BaseASTChecker): """Checks for the use of concatenation on a translated string. Translations should not be concatenated with other strings, but should instead include the string being added to the translated string to give the translators the most information. """ CHECK_DESC = ('M326 Translated messages cannot be concatenated. ' 'String should be included in translated message.') TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC'] def visit_BinOp(self, node): if isinstance(node.op, ast.Add): if self._check_call_names(node.left, self.TRANS_FUNC): self.add_error(node.left) elif self._check_call_names(node.right, self.TRANS_FUNC): self.add_error(node.right) super(CheckForTransAdd, self).generic_visit(node) def check_oslo_namespace_imports(logical_line, physical_line, filename): if pep8.noqa(physical_line): return if re.match(oslo_namespace_imports, logical_line): msg = ("M333: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield(0, msg) def dict_constructor_with_list_copy(logical_line): msg = ("M336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def factory(register): register(validate_log_translations) register(check_explicit_underscore_import) register(no_translate_debug_logs) register(CheckForStrExc) register(CheckForTransAdd) register(check_oslo_namespace_imports) register(dict_constructor_with_list_copy) manila-2.0.0/manila/service.py0000664000567000056710000003306012701407107017367 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import inspect import os import random from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_service import loopingcall from oslo_service import service from oslo_utils import importutils from manila import context from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW from manila import rpc from manila import version from manila import wsgi LOG = log.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='Seconds between nodes reporting state to datastore.'), cfg.IntOpt('periodic_interval', default=60, help='Seconds between running periodic tasks.'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range of seconds to randomly delay when starting the ' 'periodic task scheduler to reduce stampeding. ' '(Disable by setting to 0)'), cfg.StrOpt('osapi_share_listen', default="::", help='IP address for OpenStack Share API to listen on.'), cfg.PortOpt('osapi_share_listen_port', default=8786, help='Port for OpenStack Share API to listen on.'), cfg.IntOpt('osapi_share_workers', default=1, help='Number of workers for OpenStack Share API service.'), ] CONF = cfg.CONF CONF.register_opts(service_opts) class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] def start(self): version_string = version.version_string() LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'), {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) LOG.debug("Creating RPC server for service %s.", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) self.rpcserver = rpc.get_server(target, endpoints) self.rpcserver.start() self.manager.init_host() if self.report_interval: pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.FixedIntervalLoopingCall( self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) def _create_service_ref(self, context): zone = CONF.storage_availability_zone service_ref = db.service_create(context, {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': zone}) self.service_id = service_ref['id'] def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'manila-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('manila-')[2] manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name) return service_obj def kill(self): """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: LOG.warning(_LW('Service killed that has no database entry.')) def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() except Exception: pass for x in self.timers: try: x.stop() except Exception: pass self.timers = [] super(Service, self).stop() def wait(self): for x in self.timers: try: x.wait() except Exception: pass def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() zone = CONF.storage_availability_zone state_catalog = {} try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: LOG.debug('The service database object disappeared, ' 'Recreating it.') self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) state_catalog['report_count'] = service_ref['report_count'] + 1 if zone != service_ref['availability_zone']['name']: state_catalog['availability_zone'] = zone db.service_update(ctxt, self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False LOG.error(_LE('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception(_LE('model server went away')) class WSGIService(service.ServiceBase): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader() if not rpc.initialized(): rpc.init(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = getattr(CONF, '%s_workers' % name, None) if self.workers is not None and self.workers < 1: LOG.warning( _LW("Value of config option %(name)s_workers must be integer " "greater than 1. Input value ignored.") % {'name': name}) # Reset workers to default self.workers = None self.server = wsgi.Server(name, self.app, host=self.host, port=self.port) def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self): """Stop serving this API. :returns: None """ self.server.stop() def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() def reset(self): """Reset server greenpool size to default. :returns: None """ self.server.reset() def process_launcher(): return service.ProcessLauncher(CONF) # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if _launcher: raise RuntimeError(_('serve() can only be called once')) _launcher = service.launch(CONF, server, workers=workers) def wait(): LOG.debug('Full set of CONF:') for flag in CONF: flag_get = CONF.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or (flag == "sql_connection" and "mysql:" in flag_get)): LOG.debug('%(flag)s : FLAG SET ', {"flag": flag}) else: LOG.debug('%(flag)s : %(flag_get)s', {"flag": flag, "flag_get": flag_get}) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup() manila-2.0.0/manila/exception.py0000664000567000056710000005143512701407107017733 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manila base exception handling. Includes decorator for re-raising Manila-type exceptions. SHOULD include dedicated exception logging. """ import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log import six import webob.exc from manila.i18n import _ from manila.i18n import _LE LOG = log.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Whether to make exception message format errors fatal.'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) ProcessExecutionError = processutils.ProcessExecutionError class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=400, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class Error(Exception): pass class ManilaException(Exception): """Base Manila Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, detail_data={}, **kwargs): self.kwargs = kwargs self.detail_data = detail_data if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass for k, v in self.kwargs.items(): if isinstance(v, Exception): self.kwargs[k] = six.text_type(v) if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation.')) for name, value in kwargs.items(): LOG.error(_LE("%(name)s: %(value)s"), { 'name': name, 'value': value}) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message elif isinstance(message, Exception): message = six.text_type(message) if re.match('.*[^\.]\.\.$', message): message = message[:-1] self.msg = message super(ManilaException, self).__init__(message) class NetworkException(ManilaException): message = _("Exception due to network failure.") class NetworkBadConfigurationException(NetworkException): message = _("Bad network configuration: %(reason)s.") class BadConfigurationException(ManilaException): message = _("Bad configuration: %(reason)s.") class NotAuthorized(ManilaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges.") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class Conflict(ManilaException): message = _("%(err)s") code = 409 class Invalid(ManilaException): message = _("Unacceptable parameters.") code = 400 class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): message = _("Invalid input received: %(reason)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidHost(Invalid): message = _("Invalid host: %(reason)s") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class InvalidDriverMode(Invalid): message = _("Invalid driver mode: %(driver_mode)s.") class InvalidAPIVersionString(Invalid): message = _("API Version String %(version)s is of invalid format. Must " "be of format MajorNum.MinorNum.") class VersionNotFoundForAPIMethod(Invalid): message = _("API version %(version)s is not supported on this method.") class InvalidGlobalAPIVersion(Invalid): message = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class InvalidCapacity(Invalid): message = _("Invalid capacity: %(name)s = %(value)s.") class NotFound(ManilaException): message = _("Resource could not be found.") code = 404 safe = True class Found(ManilaException): message = _("Resource was found.") code = 302 safe = True class InUse(ManilaException): message = _("Resource is in use.") class AvailabilityZoneNotFound(NotFound): message = _("Availability zone %(id)s could not be found.") class ShareNetworkNotFound(NotFound): message = _("Share network %(share_network_id)s could not be found.") class ShareServerNotFound(NotFound): message = _("Share server %(share_server_id)s could not be found.") class ShareServerNotFoundByFilters(ShareServerNotFound): message = _("Share server could not be found by " "filters: %(filters_description)s.") class ShareServerInUse(InUse): message = _("Share server %(share_server_id)s is in use.") class InvalidShareServer(Invalid): message = _("Share server %(share_server_id)s is not valid.") class ShareMigrationFailed(ManilaException): message = _("Share migration failed: %(reason)s") class ShareDataCopyFailed(ManilaException): message = _("Share Data copy failed: %(reason)s") class ShareDataCopyCancelled(ManilaException): message = _("Copy of contents from share instance %(src_instance)s " "to share instance %(dest_instance)s was cancelled.") class ServiceIPNotFound(ManilaException): message = _("Service IP for instance not found: %(reason)s") class AdminIPNotFound(ManilaException): message = _("Admin port IP for service instance not found: %(reason)s") class ShareServerNotCreated(ManilaException): message = _("Share server %(share_server_id)s failed on creation.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class ServiceIsDown(Invalid): message = _("Service %(service)s is down.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler host filter %(filter_name)s could not be found.") class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler host weigher %(weigher_name)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s.") class QuotaNotFound(NotFound): message = _("Quota could not be found.") class QuotaExists(ManilaException): message = _("Quota exists for project %(project_id)s, " "resource %(resource)s.") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectUserQuotaNotFound(QuotaNotFound): message = _("Quota for user %(user_id)s in project %(project_id)s " "could not be found.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(ManilaException): message = _("Quota exceeded for resources: %(overs)s.") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class MigrationError(ManilaException): message = _("Migration error: %(reason)s.") class MalformedRequestBody(ManilaException): message = _("Malformed message body: %(reason)s.") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s.") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s.") class NoValidHost(ManilaException): message = _("No valid host was found. %(reason)s.") class WillNotSchedule(ManilaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(ManilaException): message = _("Quota exceeded: code=%(code)s.") code = 413 headers = {'Retry-After': 0} safe = True class ShareSizeExceedsAvailableQuota(QuotaError): message = _("Requested share exceeds allowed gigabytes quota.") class SnapshotSizeExceedsAvailableQuota(QuotaError): message = _("Requested snapshot exceeds allowed gigabytes quota.") class ShareLimitExceeded(QuotaError): message = _("Maximum number of shares allowed (%(allowed)d) exceeded.") class SnapshotLimitExceeded(QuotaError): message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded.") class ShareNetworksLimitExceeded(QuotaError): message = _("Maximum number of share-networks " "allowed (%(allowed)d) exceeded.") class GlusterfsException(ManilaException): message = _("Unknown Gluster exception.") class InvalidShare(Invalid): message = _("Invalid share: %(reason)s.") class ShareBusyException(Invalid): message = _("Share is busy with an active task: %(reason)s.") class InvalidShareInstance(Invalid): message = _("Invalid share instance: %(reason)s.") class ManageInvalidShare(InvalidShare): message = _("Manage existing share failed due to " "invalid share: %(reason)s") class UnmanageInvalidShare(InvalidShare): message = _("Unmanage existing share failed due to " "invalid share: %(reason)s") class PortLimitExceeded(QuotaError): message = _("Maximum number of ports exceeded.") class ShareAccessExists(ManilaException): message = _("Share access %(access_type)s:%(access)s exists.") class InvalidShareAccess(Invalid): message = _("Invalid access rule: %(reason)s") class InvalidShareAccessLevel(Invalid): message = _("Invalid or unsupported share access level: %(level)s.") class ShareBackendException(ManilaException): message = _("Share backend error: %(msg)s.") class ExportLocationNotFound(NotFound): message = _("Export location %(uuid)s could not be found.") class ShareNotFound(NotFound): message = _("Share %(share_id)s could not be found.") class ShareSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ShareSnapshotInstanceNotFound(NotFound): message = _("Snapshot instance %(instance_id)s could not be found.") class ShareSnapshotNotSupported(ManilaException): message = _("Share %(share_name)s does not support snapshots.") class ShareSnapshotIsBusy(ManilaException): message = _("Deleting snapshot %(snapshot_name)s that has " "dependent shares.") class InvalidShareSnapshot(Invalid): message = _("Invalid share snapshot: %(reason)s.") class ManageInvalidShareSnapshot(InvalidShareSnapshot): message = _("Manage existing share snapshot failed due to " "invalid share snapshot: %(reason)s.") class UnmanageInvalidShareSnapshot(InvalidShareSnapshot): message = _("Unmanage existing share snapshot failed due to " "invalid share snapshot: %(reason)s.") class ShareMetadataNotFound(NotFound): message = _("Metadata item is not found.") class InvalidShareMetadata(Invalid): message = _("Invalid metadata.") class InvalidShareMetadataSize(Invalid): message = _("Invalid metadata size.") class SecurityServiceNotFound(NotFound): message = _("Security service %(security_service_id)s could not be found.") class ShareNetworkSecurityServiceAssociationError(ManilaException): message = _("Failed to associate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class ShareNetworkSecurityServiceDissociationError(ManilaException): message = _("Failed to dissociate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class InvalidVolume(Invalid): message = _("Invalid volume.") class InvalidShareType(Invalid): message = _("Invalid share type: %(reason)s.") class InvalidExtraSpec(Invalid): message = _("Invalid extra_spec: %(reason)s.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class VolumeSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ShareTypeNotFound(NotFound): message = _("Share type %(share_type_id)s could not be found.") class ShareTypeAccessNotFound(NotFound): message = _("Share type access not found for %(share_type_id)s / " "%(project_id)s combination.") class ShareTypeNotFoundByName(ShareTypeNotFound): message = _("Share type with name %(share_type_name)s " "could not be found.") class ShareTypeExtraSpecsNotFound(NotFound): message = _("Share Type %(share_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class ShareTypeInUse(ManilaException): message = _("Share Type %(share_type_id)s deletion is not allowed with " "shares present with the type.") class ShareTypeExists(ManilaException): message = _("Share Type %(id)s already exists.") class ShareTypeAccessExists(ManilaException): message = _("Share type access for %(share_type_id)s / " "%(project_id)s combination already exists.") class ShareTypeCreateFailed(ManilaException): message = _("Cannot create share_type with " "name %(name)s and specs %(extra_specs)s.") class ManageExistingShareTypeMismatch(ManilaException): message = _("Manage existing share failed due to share type mismatch: " "%(reason)s") class ShareExtendingError(ManilaException): message = _("Share %(share_id)s could not be extended due to error " "in the driver: %(reason)s") class ShareShrinkingError(ManilaException): message = _("Share %(share_id)s could not be shrunk due to error " "in the driver: %(reason)s") class ShareShrinkingPossibleDataLoss(ManilaException): message = _("Share %(share_id)s could not be shrunk due to " "possible data loss") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class BridgeDoesNotExist(ManilaException): message = _("Bridge %(bridge)s does not exist.") class ServiceInstanceException(ManilaException): message = _("Exception in service instance manager occurred.") class ServiceInstanceUnavailable(ServiceInstanceException): message = _("Service instance is not available.") class StorageResourceException(ManilaException): message = _("Storage resource exception.") class StorageResourceNotFound(StorageResourceException): message = _("Storage resource %(name)s not found.") code = 404 class SnapshotResourceNotFound(StorageResourceNotFound): message = _("Snapshot %(name)s not found.") class SnapshotUnavailable(StorageResourceException): message = _("Snapshot %(name)s info not available.") class NetAppException(ManilaException): message = _("Exception due to NetApp failure.") class VserverNotFound(NetAppException): message = _("Vserver %(vserver)s not found.") class VserverNotSpecified(NetAppException): message = _("Vserver not specified.") class EMCVnxXMLAPIError(Invalid): message = _("%(err)s") class EMCVnxLockRequiredException(ManilaException): message = _("Unable to acquire lock(s).") class EMCVnxInvalidMoverID(ManilaException): message = _("Invalid mover or vdm %(id)s.") class HPE3ParInvalidClient(Invalid): message = _("%(err)s") class HPE3ParInvalid(Invalid): message = _("%(err)s") class HPE3ParUnexpectedError(ManilaException): message = _("%(err)s") class GPFSException(ManilaException): message = _("GPFS exception occurred.") class GPFSGaneshaException(ManilaException): message = _("GPFS Ganesha exception occurred.") class GaneshaCommandFailure(ProcessExecutionError): _description = _("Ganesha management command failed.") def __init__(self, **kw): if 'description' not in kw: kw['description'] = self._description super(GaneshaCommandFailure, self).__init__(**kw) class InvalidSqliteDB(Invalid): message = _("Invalid Sqlite database.") class SSHException(ManilaException): message = _("Exception in SSH protocol negotiation or logic.") class HDFSException(ManilaException): message = _("HDFS exception occurred!") class ZFSonLinuxException(ManilaException): message = _("ZFSonLinux exception occurred: %(msg)s") class QBException(ManilaException): message = _("Quobyte exception occurred: %(msg)s") class QBRpcException(ManilaException): """Quobyte backend specific exception.""" message = _("Quobyte JsonRpc call to backend raised " "an exception: %(result)s, Quobyte error" " code %(qbcode)s") class SSHInjectionThreat(ManilaException): message = _("SSH command injection detected: %(command)s") class HNASBackendException(ManilaException): message = _("HNAS Backend Exception: %(msg)s") class HNASConnException(ManilaException): message = _("HNAS Connection Exception: %(msg)s") class HNASItemNotFoundException(StorageResourceNotFound): message = _("HNAS Item Not Found Exception: %(msg)s") class HNASNothingToCloneException(ManilaException): message = _("HNAS Nothing To Clone Exception: %(msg)s") # ConsistencyGroup class ConsistencyGroupNotFound(NotFound): message = _("ConsistencyGroup %(consistency_group_id)s could not be " "found.") class CGSnapshotNotFound(NotFound): message = _("Consistency group snapshot %(cgsnapshot_id)s could not be " "found.") class CGSnapshotMemberNotFound(NotFound): message = _("CG snapshot %(member_id)s could not be found.") class InvalidConsistencyGroup(Invalid): message = _("Invalid ConsistencyGroup: %(reason)s") class InvalidCGSnapshot(Invalid): message = _("Invalid CGSnapshot: %(reason)s") class DriverNotInitialized(ManilaException): message = _("Share driver '%(driver)s' not initialized.") class ShareResourceNotFound(StorageResourceNotFound): message = _("Share id %(share_id)s could not be found " "in storage backend.") class ShareUmountException(ManilaException): message = _("Failed to unmount share: %(reason)s") class ShareMountException(ManilaException): message = _("Failed to mount share: %(reason)s") class ShareCopyDataException(ManilaException): message = _("Failed to copy data: %(reason)s") # Replication class ReplicationException(ManilaException): message = _("Unable to perform a replication action: %(reason)s.") class ShareReplicaNotFound(NotFound): message = _("Share Replica %(replica_id)s could not be found.") # Tegile Storage drivers class TegileAPIException(ShareBackendException): message = _("Unexpected response from Tegile IntelliFlash API: " "%(response)s") class StorageCommunicationException(ShareBackendException): message = _("Could not communicate with storage array.") manila-2.0.0/manila/scheduler/0000775000567000056710000000000012701407265017336 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/host_manager.py0000664000567000056710000005726712701407107022373 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack, LLC. # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import re try: from UserDict import IterableUserDict # noqa except ImportError: from collections import UserDict as IterableUserDict # noqa from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from manila import db from manila import exception from manila.i18n import _LI, _LW from manila.scheduler.filters import base_host as base_host_filter from manila.scheduler.weighers import base_host as base_host_weigher from manila.share import utils as share_utils from manila import utils host_manager_opts = [ cfg.ListOpt('scheduler_default_filters', default=[ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter', 'ConsistencyGroupFilter', 'ShareReplicationFilter', ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_default_weighers', default=[ 'CapacityWeigher' ], help='Which weigher class names to use for weighing hosts.') ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) CONF.import_opt('max_over_subscription_ratio', 'manila.share.driver') LOG = log.getLogger(__name__) class ReadOnlyDict(IterableUserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} self.update(source) def __setitem__(self, key, item): raise TypeError def __delitem__(self, key): raise TypeError def clear(self): raise TypeError def pop(self, key, *args): raise TypeError def popitem(self): raise TypeError def update(self, source=None): if source is None: return elif isinstance(source, IterableUserDict): self.data = source.data elif isinstance(source, type({})): self.data = source else: raise TypeError class HostState(object): """Mutable and immutable information tracked for a host.""" def __init__(self, host, capabilities=None, service=None): self.capabilities = None self.service = None self.host = host self.update_capabilities(capabilities, service) self.share_backend_name = None self.vendor_name = None self.driver_version = 0 self.storage_protocol = None self.qos = False # Mutable available resources. # These will change as resources are virtually "consumed". self.total_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 self.allocated_capacity_gb = 0 # NOTE(xyang): The apparent allocated space indicating how much # capacity has been provisioned. This could be the sum of sizes # of all shares on a backend, which could be greater than or # equal to the allocated_capacity_gb. self.provisioned_capacity_gb = 0 self.max_over_subscription_ratio = 1.0 self.thin_provisioning = False self.driver_handles_share_servers = False self.snapshot_support = True self.consistency_group_support = False self.dedupe = False self.compression = False self.replication_type = None self.replication_domain = None # PoolState for all pools self.pools = {} self.updated = None def update_capabilities(self, capabilities=None, service=None): # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def update_from_share_capability(self, capability, service=None): """Update information about a host from its share_node info. 'capability' is the status info reported by share backend, a typical capability looks like this: capability = { 'share_backend_name': 'Local NFS', #\ 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'NFS', #/ stats&capabilities 'active_shares': 10, #\ 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', #/ 'pools': [ {'pool_name': '1st pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'allocated_capacity_gb': 270, # | 'qos': 'False', # | 'reserved_percentage': 0, #/ 'dying_disks': 100, #\ 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & capabilities 'super_hero_3': 'neoncat' #/ }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': ' ', 'super_hero_2': 'Hulk', } ] } """ self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return # Update backend level info self.update_backend(capability) # Update pool level info self.update_pools(capability, service) def update_pools(self, capability, service): """Update storage pools information from backend reported info.""" if not capability: return pools = capability.get('pools', None) active_pools = set() if pools and isinstance(pools, list): # Update all pools stats according to information from list # of pools in share capacity for pool_cap in pools: pool_name = pool_cap['pool_name'] self._append_backend_info(pool_cap) cur_pool = self.pools.get(pool_name, None) if not cur_pool: # Add new pool cur_pool = PoolState(self.host, pool_cap, pool_name) self.pools[pool_name] = cur_pool cur_pool.update_from_share_capability(pool_cap, service) active_pools.add(pool_name) elif pools is None: # To handle legacy driver that doesn't report pool # information in the capability, we have to prepare # a pool from backend level info, or to update the one # we created in self.pools. pool_name = self.share_backend_name if pool_name is None: # To get DEFAULT_POOL_NAME pool_name = share_utils.extract_host(self.host, 'pool', True) if len(self.pools) == 0: # No pool was there single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool else: # This is a update from legacy driver try: single_pool = self.pools[pool_name] except KeyError: single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool single_pool.update_from_share_capability(capability, service) active_pools.add(pool_name) # Remove non-active pools from self.pools nonactive_pools = set(self.pools.keys()) - active_pools for pool in nonactive_pools: LOG.debug("Removing non-active pool %(pool)s @ %(host)s " "from scheduler cache.", {'pool': pool, 'host': self.host}) del self.pools[pool] def _append_backend_info(self, pool_cap): # Fill backend level info to pool if needed. if not pool_cap.get('share_backend_name'): pool_cap['share_backend_name'] = self.share_backend_name if not pool_cap.get('storage_protocol'): pool_cap['storage_protocol'] = self.storage_protocol if not pool_cap.get('vendor_name'): pool_cap['vendor_name'] = self.vendor_name if not pool_cap.get('driver_version'): pool_cap['driver_version'] = self.driver_version if not pool_cap.get('timestamp'): pool_cap['timestamp'] = self.updated if not pool_cap.get('storage_protocol'): pool_cap['storage_protocol'] = self.storage_protocol if 'driver_handles_share_servers' not in pool_cap: pool_cap['driver_handles_share_servers'] = ( self.driver_handles_share_servers) if 'snapshot_support' not in pool_cap: pool_cap['snapshot_support'] = self.snapshot_support if not pool_cap.get('consistency_group_support'): pool_cap['consistency_group_support'] = \ self.consistency_group_support if 'dedupe' not in pool_cap: pool_cap['dedupe'] = self.dedupe if 'compression' not in pool_cap: pool_cap['compression'] = self.compression if not pool_cap.get('replication_type'): pool_cap['replication_type'] = self.replication_type if not pool_cap.get('replication_domain'): pool_cap['replication_domain'] = self.replication_domain def update_backend(self, capability): self.share_backend_name = capability.get('share_backend_name') self.vendor_name = capability.get('vendor_name') self.driver_version = capability.get('driver_version') self.storage_protocol = capability.get('storage_protocol') self.driver_handles_share_servers = capability.get( 'driver_handles_share_servers') self.snapshot_support = capability.get('snapshot_support') self.consistency_group_support = capability.get( 'consistency_group_support', False) self.updated = capability['timestamp'] self.replication_type = capability.get('replication_type') self.replication_domain = capability.get('replication_domain') def consume_from_share(self, share): """Incrementally update host state from an share.""" if (isinstance(self.free_capacity_gb, six.string_types) and self.free_capacity_gb != 'unknown'): raise exception.InvalidCapacity( name='free_capacity_gb', value=six.text_type(self.free_capacity_gb) ) if self.free_capacity_gb != 'unknown': self.free_capacity_gb -= share['size'] self.updated = timeutils.utcnow() def __repr__(self): return ("host: '%(host)s', free_capacity_gb: %(free)s, " "pools: %(pools)s" % {'host': self.host, 'free': self.free_capacity_gb, 'pools': self.pools} ) class PoolState(HostState): def __init__(self, host, capabilities, pool_name): new_host = share_utils.append_host(host, pool_name) super(PoolState, self).__init__(new_host, capabilities) self.pool_name = pool_name # No pools in pool self.pools = None def update_from_share_capability(self, capability, service=None): """Update information about a pool from its share_node info.""" self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return self.update_backend(capability) self.total_capacity_gb = capability['total_capacity_gb'] self.free_capacity_gb = capability['free_capacity_gb'] self.allocated_capacity_gb = capability.get( 'allocated_capacity_gb', 0) self.qos = capability.get('qos', False) self.reserved_percentage = capability['reserved_percentage'] # NOTE(xyang): provisioned_capacity_gb is the apparent total # capacity of all the shares created on a backend, which is # greater than or equal to allocated_capacity_gb, which is the # apparent total capacity of all the shares created on a backend # in Manila. Using allocated_capacity_gb as the default of # provisioned_capacity_gb if it is not set. self.provisioned_capacity_gb = capability.get( 'provisioned_capacity_gb', self.allocated_capacity_gb) self.max_over_subscription_ratio = capability.get( 'max_over_subscription_ratio', CONF.max_over_subscription_ratio) self.thin_provisioning = capability.get( 'thin_provisioning', False) self.dedupe = capability.get( 'dedupe', False) self.compression = capability.get( 'compression', False) self.replication_type = capability.get( 'replication_type', self.replication_type) self.replication_domain = capability.get( 'replication_domain') def update_pools(self, capability): # Do nothing, since we don't have pools within pool, yet pass class HostManager(object): """Base HostManager class.""" host_state_cls = HostState def __init__(self): self.service_states = {} # { : {: {cap k : v}}} self.host_state_map = {} self.filter_handler = base_host_filter.HostFilterHandler( 'manila.scheduler.filters') self.filter_classes = self.filter_handler.get_all_classes() self.weight_handler = base_host_weigher.HostWeightHandler( 'manila.scheduler.weighers') self.weight_classes = self.weight_handler.get_all_classes() def _choose_host_filters(self, filter_cls_names): """Choose acceptable filters. Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if filter_cls_names is None: filter_cls_names = CONF.scheduler_default_filters if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: found_class = False for cls in self.filter_classes: if cls.__name__ == filter_name: found_class = True good_filters.append(cls) break if not found_class: bad_filters.append(filter_name) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def _choose_host_weighers(self, weight_cls_names): """Choose acceptable weighers. Since the caller may specify which weighers to use, we need to have an authoritative list of what is permissible. This function checks the weigher names against a predefined set of acceptable weighers. """ if weight_cls_names is None: weight_cls_names = CONF.scheduler_default_weighers if not isinstance(weight_cls_names, (list, tuple)): weight_cls_names = [weight_cls_names] good_weighers = [] bad_weighers = [] for weigher_name in weight_cls_names: found_class = False for cls in self.weight_classes: if cls.__name__ == weigher_name: good_weighers.append(cls) found_class = True break if not found_class: bad_weighers.append(weigher_name) if bad_weighers: msg = ", ".join(bad_weighers) raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) return good_weighers def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None): """Filter hosts and return only ones passing all filters.""" filter_classes = self._choose_host_filters(filter_class_names) return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties) def get_weighed_hosts(self, hosts, weight_properties, weigher_class_names=None): """Weigh the hosts.""" weigher_classes = self._choose_host_weighers(weigher_class_names) weight_properties['server_pools_mapping'] = {} for backend, info in self.service_states.items(): weight_properties['server_pools_mapping'].update( info.get('server_pools_mapping', {})) return self.weight_handler.get_weighed_objects(weigher_classes, hosts, weight_properties) def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" if service_name not in ('share',): LOG.debug('Ignoring %(service_name)s service update ' 'from %(host)s', {'service_name': service_name, 'host': host}) return # Copy the capabilities, so we don't modify the original dict capability_copy = dict(capabilities) capability_copy["timestamp"] = timeutils.utcnow() # Reported time self.service_states[host] = capability_copy LOG.debug("Received %(service_name)s service update from " "%(host)s: %(cap)s" % {'service_name': service_name, 'host': host, 'cap': capabilities}) def _update_host_state_map(self, context): # Get resource usage across the available share nodes: topic = CONF.share_topic share_services = db.service_get_all_by_topic(context, topic) active_hosts = set() for service in share_services: host = service['host'] # Warn about down services and remove them from host_state_map if not utils.service_is_up(service) or service['disabled']: LOG.warning(_LW("Share service is down. (host: %s).") % host) continue # Create and register host_state if not in host_state_map capabilities = self.service_states.get(host, None) host_state = self.host_state_map.get(host) if not host_state: host_state = self.host_state_cls( host, capabilities=capabilities, service=dict(service.items())) self.host_state_map[host] = host_state # Update capabilities and attributes in host_state host_state.update_from_share_capability( capabilities, service=dict(service.items())) active_hosts.add(host) # remove non-active hosts from host_state_map nonactive_hosts = set(self.host_state_map.keys()) - active_hosts for host in nonactive_hosts: LOG.info(_LI("Removing non-active host: %(host)s from" "scheduler cache."), {'host': host}) self.host_state_map.pop(host, None) def get_all_host_states_share(self, context): """Returns a dict of all the hosts the HostManager knows about. Each of the consumable resources in HostState are populated with capabilities scheduler received from RPC. For example: {'192.168.1.100': HostState(), ...} """ self._update_host_state_map(context) # Build a pool_state map and return that map instead of host_state_map all_pools = {} for host, state in self.host_state_map.items(): for key in state.pools: pool = state.pools[key] # Use host.pool_name to make sure key is unique pool_key = '.'.join([host, pool.pool_name]) all_pools[pool_key] = pool return six.itervalues(all_pools) def get_pools(self, context, filters=None): """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_host_state_map(context) all_pools = [] for host, host_state in self.host_state_map.items(): for pool in host_state.pools.values(): fully_qualified_pool_name = share_utils.append_host( host, pool.pool_name) host_name = share_utils.extract_host( fully_qualified_pool_name, level='host') backend_name = share_utils.extract_host( fully_qualified_pool_name, level='backend').split('@')[1] \ if '@' in fully_qualified_pool_name else None pool_name = share_utils.extract_host( fully_qualified_pool_name, level='pool') new_pool = { 'name': fully_qualified_pool_name, 'host': host_name, 'backend': backend_name, 'pool': pool_name, 'capabilities': pool.capabilities, } if self._passes_filters(new_pool, filters): all_pools.append(new_pool) return all_pools def _passes_filters(self, dict_to_check, filter_dict): """Applies a set of regex filters to a dictionary. If no filter keys are supplied, the data passes unfiltered and the method returns True. Otherwise, each key in the filter (filter_dict) must be present in the data (dict_to_check) and the filter values are applied as regex expressions to the data values. If any of the filter values fail to match their corresponding data values, the method returns False. But if all filters match, the method returns True. """ if not filter_dict: return True for filter_key, filter_value in filter_dict.items(): if filter_key not in dict_to_check: return False if not re.match(filter_value, dict_to_check.get(filter_key)): return False return True manila-2.0.0/manila/scheduler/scheduler_options.py0000664000567000056710000000703412701407107023440 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import os from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils from manila.i18n import _LE scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = log.getLogger(__name__) class SchedulerOptions(object): """Monitor and load local .json file for filtering and weighing. SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error as e: LOG.exception(_LE("Could not stat scheduler options file " "%(filename)s: '%(e)s'"), {"filename": filename, "e": e}) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return jsonutils.load(handle) except ValueError as e: LOG.exception(_LE("Could not decode scheduler options: " "'%(e)s'"), {"e": e}) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data manila-2.0.0/manila/scheduler/__init__.py0000664000567000056710000000000012701407107021430 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/drivers/0000775000567000056710000000000012701407265021014 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/drivers/simple.py0000664000567000056710000000621512701407107022656 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple Scheduler """ from oslo_config import cfg from manila import db from manila import exception from manila.i18n import _ from manila.scheduler.drivers import base from manila.scheduler.drivers import chance from manila import utils simple_scheduler_opts = [ cfg.IntOpt("max_gigabytes", default=10000, help="Maximum number of volume gigabytes to allow per host."), ] CONF = cfg.CONF CONF.register_opts(simple_scheduler_opts) class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up and has the fewest shares.""" # TODO(rushiagr) - pick only hosts that run shares elevated = context.elevated() share_id = request_spec.get('share_id') snapshot_id = request_spec.get('snapshot_id') share_properties = request_spec.get('share_properties') share_size = share_properties.get('size') instance_properties = request_spec.get('share_instance_properties', {}) availability_zone_id = instance_properties.get('availability_zone_id') results = db.service_get_all_share_sorted(elevated) if availability_zone_id: results = [(service_g, gigs) for (service_g, gigs) in results if (service_g['availability_zone_id'] == availability_zone_id)] for result in results: (service, share_gigabytes) = result if share_gigabytes + share_size > CONF.max_gigabytes: msg = _("Not enough allocatable share gigabytes remaining") raise exception.NoValidHost(reason=msg) if utils.service_is_up(service) and not service['disabled']: updated_share = base.share_update_db(context, share_id, service['host']) self.share_rpcapi.create_share_instance( context, updated_share.instance, service['host'], request_spec, None, snapshot_id=snapshot_id) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) manila-2.0.0/manila/scheduler/drivers/__init__.py0000664000567000056710000000000012701407107023106 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/drivers/chance.py0000664000567000056710000000515512701407107022610 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from oslo_config import cfg from manila import exception from manila.i18n import _ from manila.scheduler.drivers import base CONF = cfg.CONF class ChanceScheduler(base.Scheduler): """Implements Scheduler as a random node selector.""" def _filter_hosts(self, request_spec, hosts, **kwargs): """Filter a list of hosts based on request_spec.""" filter_properties = kwargs.get('filter_properties', {}) ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))] def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up at random.""" topic = CONF.share_topic host = self._schedule(context, topic, request_spec, filter_properties=filter_properties) share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = base.share_update_db(context, share_id, host) self.share_rpcapi.create_share_instance( context, updated_share.instance, host, request_spec, filter_properties, snapshot_id ) manila-2.0.0/manila/scheduler/drivers/filter.py0000664000567000056710000004306512701407107022656 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The FilterScheduler is for creating shares. You can customize this scheduler by specifying your own share Filters and Weighing Functions. """ from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.i18n import _LE, _LI from manila.scheduler.drivers import base from manila.scheduler import scheduler_options from manila.share import share_types CONF = cfg.CONF LOG = log.getLogger(__name__) class FilterScheduler(base.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.cost_function_cache = None self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def get_pools(self, context, filters): return self.host_manager.get_pools(context, filters) def _post_select_populate_filter_properties(self, filter_properties, host_state): """Add additional information to filter properties. Add additional information to the filter properties after a host has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_host(filter_properties, host_state.host) def _add_retry_host(self, filter_properties, host): """Add retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry') if not retry: return hosts = retry['hosts'] hosts.append(host) def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: msg = _("Invalid value for 'scheduler_max_attempts', " "must be >=1") raise exception.InvalidParameterValue(err=msg) return max_attempts def schedule_create_share(self, context, request_spec, filter_properties): weighed_host = self._schedule_share(context, request_spec, filter_properties) if not weighed_host: raise exception.NoValidHost(reason="") host = weighed_host.obj.host share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = base.share_update_db(context, share_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_instance( context, updated_share.instance, host, request_spec=request_spec, filter_properties=filter_properties, snapshot_id=snapshot_id ) def schedule_create_replica(self, context, request_spec, filter_properties): share_replica_id = request_spec['share_instance_properties'].get('id') weighed_host = self._schedule_share( context, request_spec, filter_properties) if not weighed_host: msg = _('Failed to find a weighted host for scheduling share ' 'replica %s.') raise exception.NoValidHost(reason=msg % share_replica_id) host = weighed_host.obj.host updated_share_replica = base.share_replica_update_db( context, share_replica_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_replica( context, updated_share_replica, host, request_spec=request_spec, filter_properties=filter_properties) def _format_filter_properties(self, context, filter_properties, request_spec): elevated = context.elevated() share_properties = request_spec['share_properties'] share_instance_properties = (request_spec.get( 'share_instance_properties', {})) # Since Manila is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. resource_properties = share_properties.copy() resource_properties.update(share_instance_properties.copy()) share_type = request_spec.get("share_type", {}) if not share_type: msg = _("You must create a share type in advance," " and specify in request body or" " set default_share_type in manila.conf.") LOG.error(msg) raise exception.InvalidParameterValue(err=msg) extra_specs = share_type.get('extra_specs', {}) if extra_specs: for extra_spec_name in share_types.get_boolean_extra_specs(): extra_spec = extra_specs.get(extra_spec_name) if extra_spec is not None: if not extra_spec.startswith(""): extra_spec = " %s" % extra_spec share_type['extra_specs'][extra_spec_name] = extra_spec resource_type = request_spec.get("share_type") or {} request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() # NOTE(ameade): If a consistency group is specified, pass the # consistency group support level to the ConsistencyGroupFilter # (host, pool, or False) cg_support = None cg = request_spec.get('consistency_group') if cg: temp_hosts = self.host_manager.get_all_host_states_share(elevated) cg_host = next((host for host in temp_hosts if host.host == cg.get('host')), None) if cg_host: cg_support = cg_host.consistency_group_support # NOTE(gouthamr): If 'active_replica_host' is present in the request # spec, pass that host's 'replication_domain' to the # ShareReplication filter. active_replica_host = request_spec.get('active_replica_host') replication_domain = None if active_replica_host: temp_hosts = self.host_manager.get_all_host_states_share(elevated) ar_host = next((host for host in temp_hosts if host.host == active_replica_host), None) if ar_host: replication_domain = ar_host.replication_domain if filter_properties is None: filter_properties = {} self._populate_retry_share(filter_properties, resource_properties) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': resource_type, 'cg_support': cg_support, 'consistency_group': cg, 'replication_domain': replication_domain, }) self.populate_filter_properties_share(request_spec, filter_properties) return filter_properties, share_properties def _schedule_share(self, context, request_spec, filter_properties=None): """Returns a list of hosts that meet the required specs. The list is ordered by their fitness. """ elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. hosts = self.host_manager.get_all_host_states_share(elevated) # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties) if not hosts: return None LOG.debug("Filtered share %(hosts)s", {"hosts": hosts}) # weighted_host = WeightedHost() ... the best # host for the job. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) best_host = weighed_hosts[0] LOG.debug("Choosing for share: %(best_host)s", {"best_host": best_host}) # NOTE(rushiagr): updating the available space parameters at same place best_host.obj.consume_from_share(share_properties) return best_host def _populate_retry_share(self, filter_properties, properties): """Populate filter properties with retry history. Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of share service hosts tried } filter_properties['retry'] = retry share_id = properties.get('share_id') self._log_share_error(share_id, retry) if retry['num_attempts'] > max_attempts: msg = _("Exceeded max scheduling attempts %(max_attempts)d for " "share %(share_id)s") % { "max_attempts": max_attempts, "share_id": share_id } raise exception.NoValidHost(reason=msg) def _log_share_error(self, share_id, retry): """Log any exceptions from a previous share create operation. If the request contained an exception from a previous share create operation, log it to aid debugging. """ exc = retry.pop('exc', None) # string-ified exception from share if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts') if not hosts: return # no previously attempted hosts, skip last_host = hosts[-1] LOG.error(_LE("Error scheduling %(share_id)s from last share-service: " "%(last_host)s : %(exc)s"), { "share_id": share_id, "last_host": last_host, "exc": "exc" }) def populate_filter_properties_share(self, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ shr = request_spec['share_properties'] inst = request_spec['share_instance_properties'] filter_properties['size'] = shr['size'] filter_properties['availability_zone_id'] = ( inst.get('availability_zone_id') ) filter_properties['user_id'] = shr.get('user_id') filter_properties['metadata'] = shr.get('metadata') def schedule_create_consistency_group(self, context, group_id, request_spec, filter_properties): LOG.info(_LI("Scheduling consistency group %s") % group_id) host = self._get_best_host_for_consistency_group( context, request_spec) if not host: msg = _("No hosts available for consistency group %s") % group_id raise exception.NoValidHost(reason=msg) msg = _LI("Chose host %(host)s for create_consistency_group %(cg_id)s") LOG.info(msg % {'host': host, 'cg_id': group_id}) updated_group = base.cg_update_db(context, group_id, host) self.share_rpcapi.create_consistency_group(context, updated_group, host) def _get_weighted_hosts_for_share_type(self, context, request_spec, share_type): config_options = self._get_configuration_options() # NOTE(ameade): Find our local list of acceptable hosts by # filtering and weighing our options. We virtually consume # resources on it so subsequent selections can adjust accordingly. # NOTE(ameade): Remember, we are using an iterator here. So only # traverse this list once. all_hosts = self.host_manager.get_all_host_states_share(context) if not all_hosts: return [] share_type['extra_specs'] = share_type.get('extra_specs', {}) if share_type['extra_specs']: for spec_name in share_types.get_undeletable_extra_specs(): extra_spec = share_type['extra_specs'].get(spec_name) if extra_spec is not None: share_type['extra_specs'][spec_name] = ( " %s" % extra_spec) # Only allow pools that support consistency groups share_type['extra_specs']['consistency_group_support'] = ( " host pool") filter_properties = { 'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': share_type, 'size': 0, } # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(all_hosts, filter_properties) if not hosts: return [] LOG.debug("Filtered %s" % hosts) # weighted_host = WeightedHost() ... the best host for the job. weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not weighed_hosts: return [] return weighed_hosts def _get_weighted_candidates_cg(self, context, request_spec): """Finds hosts that support the consistency group. Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() shr_types = request_spec.get("share_types") weighed_hosts = [] for iteration_count, share_type in enumerate(shr_types): temp_weighed_hosts = self._get_weighted_hosts_for_share_type( elevated, request_spec, share_type) # NOTE(ameade): Take the intersection of hosts so we have one that # can support all share types of the CG if iteration_count == 0: weighed_hosts = temp_weighed_hosts else: new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_hosts: if host1.obj.host == host2.obj.host: new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts if not weighed_hosts: return [] return weighed_hosts def _get_best_host_for_consistency_group(self, context, request_spec): weighed_hosts = self._get_weighted_candidates_cg( context, request_spec) if not weighed_hosts: return None return weighed_hosts[0].obj.host def host_passes_filters(self, context, host, request_spec, filter_properties): elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) hosts = self.host_manager.get_all_host_states_share(elevated) hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties) hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) for tgt_host in hosts: if tgt_host.obj.host == host: return tgt_host.obj msg = (_('Cannot place share %(id)s on %(host)s') % {'id': request_spec['share_id'], 'host': host}) raise exception.NoValidHost(reason=msg) manila-2.0.0/manila/scheduler/drivers/base.py0000664000567000056710000001202612701407107022274 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from manila import db from manila.i18n import _ from manila.share import rpcapi as share_rpcapi from manila import utils scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='manila.scheduler.host_manager.HostManager', help='The scheduler host manager class to use.'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule a share.'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def share_update_db(context, share_id, host): '''Set the host and set the scheduled_at field of a share. :returns: A Share with the updated fields set properly. ''' now = timeutils.utcnow() values = {'host': host, 'scheduled_at': now} return db.share_update(context, share_id, values) def share_replica_update_db(context, share_replica_id, host): """Set the host and the scheduled_at field of a share replica. :returns: A Share Replica with the updated fields set. """ now = timeutils.utcnow() values = {'host': host, 'scheduled_at': now} return db.share_replica_update(context, share_replica_id, values) def cg_update_db(context, cg_id, host): '''Set the host and set the updated_at field of a consistency group. :returns: A CG with the updated fields set properly. ''' now = timeutils.utcnow() values = {'host': host, 'updated_at': now} return db.consistency_group_update(context, cg_id, values) class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.share_rpcapi = share_rpcapi.ShareAPI() def get_host_list(self): """Get a list of hosts from the HostManager.""" return self.host_manager.get_host_list() def get_service_capabilities(self): """Get the normalized set of capabilities for the services.""" return self.host_manager.get_service_capabilities() def update_service_capabilities(self, service_name, host, capabilities): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, host, capabilities) def hosts_up(self, context, topic): """Return the list of hosts that have a running service for topic.""" services = db.service_get_all_by_topic(context, topic) return [service['host'] for service in services if utils.service_is_up(service)] def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_create_share(self, context, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_share")) def schedule_create_consistency_group(self, context, group_id, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_create_consistency_group")) def get_pools(self, context, filters): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement get_pools")) def host_passes_filters(self, context, host, request_spec, filter_properties): """Must override schedule method for migration to work.""" raise NotImplementedError(_("Must implement host_passes_filters")) def schedule_create_replica(self, context, request_spec, filter_properties): """Must override schedule method for create replica to work.""" raise NotImplementedError(_("Must implement schedule_create_replica")) manila-2.0.0/manila/scheduler/manager.py0000664000567000056710000002550412701407107021323 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils import six from manila.common import constants from manila import context from manila import db from manila import exception from manila.i18n import _LE, _LW from manila import manager from manila import rpc from manila.share import rpcapi as share_rpcapi LOG = log.getLogger(__name__) scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='manila.scheduler.drivers.' 'filter.FilterScheduler', help='Default scheduler driver to use.') CONF = cfg.CONF CONF.register_opt(scheduler_driver_opt) # Drivers that need to change module paths or class names can add their # old/new path here to maintain backward compatibility. MAPPING = { 'manila.scheduler.chance.ChanceScheduler': 'manila.scheduler.drivers.chance.ChanceScheduler', 'manila.scheduler.filter_scheduler.FilterScheduler': 'manila.scheduler.drivers.filter.FilterScheduler', 'manila.scheduler.simple.SimpleScheduler': 'manila.scheduler.drivers.simple.SimpleScheduler', } class SchedulerManager(manager.Manager): """Chooses a host to create shares.""" RPC_API_VERSION = '1.6' def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver if scheduler_driver in MAPPING: msg_args = { 'old': scheduler_driver, 'new': MAPPING[scheduler_driver], } LOG.warning(_LW("Scheduler driver path %(old)s is deprecated, " "update your configuration to the new path " "%(new)s"), msg_args) scheduler_driver = MAPPING[scheduler_driver] self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) def init_host(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) def get_host_list(self, context): """Get a list of hosts from the HostManager.""" return self.driver.get_host_list() def get_service_capabilities(self, context): """Get the normalized set of capabilities for this zone.""" return self.driver.get_service_capabilities() def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} self.driver.update_service_capabilities(service_name, host, capabilities) def create_share_instance(self, context, request_spec=None, filter_properties=None): try: self.driver.schedule_create_share(context, request_spec, filter_properties) except exception.NoValidHost as ex: self._set_share_state_and_notify('create_share', {'status': constants.STATUS_ERROR}, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_share_state_and_notify('create_share', {'status': constants.STATUS_ERROR}, context, ex, request_spec) def get_pools(self, context, filters=None): """Get active pools from the scheduler's cache.""" return self.driver.get_pools(context, filters) def manage_share(self, context, share_id, driver_options, request_spec, filter_properties=None): """Ensure that the host exists and can accept the share.""" def _manage_share_set_error(self, context, ex, request_spec): self._set_share_state_and_notify( 'manage_share', {'status': constants.STATUS_MANAGE_ERROR}, context, ex, request_spec) share_ref = db.share_get(context, share_id) try: self.driver.host_passes_filters( context, share_ref['host'], request_spec, filter_properties) except Exception as ex: with excutils.save_and_reraise_exception(): _manage_share_set_error(self, context, ex, request_spec) else: share_rpcapi.ShareAPI().manage_share(context, share_ref, driver_options) def migrate_share_to_host(self, context, share_id, host, force_host_copy, notify, request_spec, filter_properties=None): """Ensure that the host exists and can accept the share.""" def _migrate_share_set_error(self, context, ex, request_spec): self._set_share_state_and_notify( 'migrate_share_to_host', {'task_state': constants.TASK_STATE_MIGRATION_ERROR}, context, ex, request_spec) try: tgt_host = self.driver.host_passes_filters(context, host, request_spec, filter_properties) except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) else: share_ref = db.share_get(context, share_id) try: share_rpcapi.ShareAPI().migration_start( context, share_ref, tgt_host, force_host_copy, notify) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) def _set_share_state_and_notify(self, method, state, context, ex, request_spec): LOG.error(_LE("Failed to schedule %(method)s: %(ex)s"), {"method": method, "ex": six.text_type(ex)}) properties = request_spec.get('share_properties', {}) share_id = request_spec.get('share_id', None) if share_id: db.share_update(context, share_id, state) payload = dict(request_spec=request_spec, share_properties=properties, share_id=share_id, state=state, method=method, reason=ex) rpc.get_notifier("scheduler").error( context, 'scheduler.' + method, payload) def request_service_capabilities(self, context): share_rpcapi.ShareAPI().publish_service_capabilities(context) def _set_cg_error_state(self, method, context, ex, request_spec): LOG.warning(_LW("Failed to schedule_%(method)s: %(ex)s"), {"method": method, "ex": ex}) cg_state = {'status': constants.STATUS_ERROR} consistency_group_id = request_spec.get('consistency_group_id') if consistency_group_id: db.consistency_group_update(context, consistency_group_id, cg_state) # TODO(ameade): add notifications def create_consistency_group(self, context, cg_id, request_spec=None, filter_properties=None): try: self.driver.schedule_create_consistency_group(context, cg_id, request_spec, filter_properties) except exception.NoValidHost as ex: self._set_cg_error_state('create_consistency_group', context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_cg_error_state('create_consistency_group', context, ex, request_spec) def _set_share_replica_error_state(self, context, method, exc, request_spec): LOG.warning(_LW("Failed to schedule_%(method)s: %(exc)s"), {'method': method, 'exc': exc}) status_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } share_replica_id = request_spec.get( 'share_instance_properties').get('id') # Set any snapshot instances to 'error'. replica_snapshots = db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica_id}) for snapshot_instance in replica_snapshots: db.share_snapshot_instance_update( context, snapshot_instance['id'], {'status': constants.STATUS_ERROR}) db.share_replica_update(context, share_replica_id, status_updates) def create_share_replica(self, context, request_spec=None, filter_properties=None): try: self.driver.schedule_create_replica(context, request_spec, filter_properties) except exception.NoValidHost as exc: self._set_share_replica_error_state( context, 'create_share_replica', exc, request_spec) except Exception as exc: with excutils.save_and_reraise_exception(): self._set_share_replica_error_state( context, 'create_share_replica', exc, request_spec) manila-2.0.0/manila/scheduler/base_handler.py0000664000567000056710000000342112701407107022312 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A common base for handling extension classes. Used by BaseFilterHandler and BaseWeightHandler """ import inspect from stevedore import extension class BaseHandler(object): """Base class to handle loading filter and weight classes.""" def __init__(self, modifier_class_type, modifier_namespace): self.namespace = modifier_namespace self.modifier_class_type = modifier_class_type self.extension_manager = extension.ExtensionManager(modifier_namespace) def _is_correct_class(self, cls): """Check if an object is the correct type. Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(cls) and not cls.__name__.startswith('_') and issubclass(cls, self.modifier_class_type)) def get_all_classes(self): # We use a set, as some classes may have an entrypoint of their own, # and also be returned by a function such as 'all_filters' for example return [ext.plugin for ext in self.extension_manager if self._is_correct_class(ext.plugin)] manila-2.0.0/manila/scheduler/rpcapi.py0000664000567000056710000001141212701407112021154 0ustar jenkinsjenkins00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from manila import rpc CONF = cfg.CONF class SchedulerAPI(object): """Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Add get_pools method 1.2 - Introduce Share Instances: Replace create_share() - > create_share_instance() 1.3 - Add create_consistency_group method 1.4 - Add migrate_share_to_host method 1.5 - Add create_share_replica 1.6 - Add manage_share """ RPC_API_VERSION = '1.6' def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=CONF.scheduler_topic, version=self.RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.6') def create_share_instance(self, context, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.2') return call_context.cast(context, 'create_share_instance', request_spec=request_spec_p, filter_properties=filter_properties) def update_service_capabilities(self, context, service_name, host, capabilities): call_context = self.client.prepare(fanout=True, version='1.0') call_context.cast(context, 'update_service_capabilities', service_name=service_name, host=host, capabilities=capabilities) def get_pools(self, context, filters=None): call_context = self.client.prepare(version='1.1') return call_context.call(context, 'get_pools', filters=filters) def create_consistency_group(self, context, cg_id, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.3') return call_context.cast(context, 'create_consistency_group', cg_id=cg_id, request_spec=request_spec_p, filter_properties=filter_properties) def migrate_share_to_host(self, context, share_id, host, force_host_copy, notify, request_spec=None, filter_properties=None): call_context = self.client.prepare(version='1.4') request_spec_p = jsonutils.to_primitive(request_spec) return call_context.call(context, 'migrate_share_to_host', share_id=share_id, host=host, force_host_copy=force_host_copy, notify=notify, request_spec=request_spec_p, filter_properties=filter_properties) def create_share_replica(self, context, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.5') return call_context.cast(context, 'create_share_replica', request_spec=request_spec_p, filter_properties=filter_properties) def manage_share(self, context, share_id, driver_options, request_spec=None, filter_properties=None): call_context = self.client.prepare(version='1.6') return call_context.call(context, 'manage_share', share_id=share_id, driver_options=driver_options, request_spec=request_spec, filter_properties=filter_properties) manila-2.0.0/manila/scheduler/filters/0000775000567000056710000000000012701407265021006 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/filters/retry.py0000664000567000056710000000306612701407107022525 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class RetryFilter(base_host.BaseHostFilter): """Filter out already tried nodes for scheduling purposes.""" def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" retry = filter_properties.get('retry') if not retry: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled") return True hosts = retry.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s", {"host": host, "pass_msg": pass_msg, "hosts": hosts}) # Host passes if it's not in the list of previously attempted hosts: return passes manila-2.0.0/manila/scheduler/filters/__init__.py0000664000567000056710000000000012701407107023100 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/filters/share_replication.py0000664000567000056710000000705712701407107025057 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class ShareReplicationFilter(base_host.BaseHostFilter): """ShareReplicationFilter filters hosts based on replication support.""" def host_passes(self, host_state, filter_properties): """Return True if 'active' replica's host can replicate with host. Design of this filter: - Share replication is symmetric. All backends that can replicate between each other must share the same 'replication_domain'. - For scheduling a share that can be replicated in the future, this filter checks for 'replication_domain' capability. - For scheduling a replica, it checks for the 'replication_domain' compatibility. """ active_replica_host = filter_properties.get('request_spec', {}).get( 'active_replica_host') existing_replica_hosts = filter_properties.get('request_spec', {}).get( 'all_replica_hosts', '').split(',') replication_type = filter_properties.get('resource_type', {}).get( 'extra_specs', {}).get('replication_type') active_replica_replication_domain = filter_properties.get( 'replication_domain') host_replication_domain = host_state.replication_domain if replication_type is None: # NOTE(gouthamr): You're probably not creating a replicated # share or a replica, then this host obviously passes. return True elif host_replication_domain is None: msg = "Replication is not enabled on host %s." LOG.debug(msg, host_state.host) return False elif active_replica_host is None: # 'replication_type' filtering will be handled by the # capabilities filter, since it is a share-type extra-spec. return True # Scheduler filtering by replication_domain for a replica if active_replica_replication_domain != host_replication_domain: msg = ("The replication domain of Host %(host)s is " "'%(host_domain)s' and it does not match the replication " "domain of the 'active' replica's host: " "%(active_replica_host)s, which is '%(arh_domain)s'. ") kwargs = { "host": host_state.host, "host_domain": host_replication_domain, "active_replica_host": active_replica_host, "arh_domain": active_replica_replication_domain, } LOG.debug(msg, kwargs) return False # Check host string for already created replicas if host_state.host in existing_replica_hosts: msg = ("Skipping host %s since it already hosts a replica for " "this share.") LOG.debug(msg, host_state.host) return False return True manila-2.0.0/manila/scheduler/filters/capabilities.py0000664000567000056710000000673412701407107024016 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from manila.scheduler.filters import base_host from manila.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class CapabilitiesFilter(base_host.BaseHostFilter): """HostFilter to work with resource (instance & volume) type records.""" def _satisfies_extra_specs(self, capabilities, resource_type): """Compare capabilities against extra specs. Check that the capabilities provided by the services satisfy the extra specs associated with the resource type. """ extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True for key, req in extra_specs.items(): # Either not scoped format, or in capabilities scope scope = key.split(':') # Ignore scoped (such as vendor-specific) capabilities if len(scope) > 1 and scope[0] != "capabilities": continue # Strip off prefix if spec started with 'capabilities:' elif scope[0] == "capabilities": del scope[0] cap = capabilities for index in range(len(scope)): try: cap = cap.get(scope[index]) except AttributeError: cap = None if cap is None: LOG.debug("Host doesn't provide capability '%(cap)s' " "listed in the extra specs", {'cap': scope[index]}) return False # Make all capability values a list so we can handle lists cap_list = [cap] if not isinstance(cap, list) else cap # Loop through capability values looking for any match for cap_value in cap_list: if extra_specs_ops.match(cap_value, req): break else: # Nothing matched, so bail out LOG.debug('Share type extra spec requirement ' '"%(key)s=%(req)s" does not match reported ' 'capability "%(cap)s"', {'key': key, 'req': req, 'cap': cap}) return False return True def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create resource_type.""" # Note(zhiteng) Currently only Cinder and Nova are using # this filter, so the resource type is either instance or # volume. resource_type = filter_properties.get('resource_type') if not self._satisfies_extra_specs(host_state.capabilities, resource_type): LOG.debug("%(host_state)s fails resource_type extra_specs " "requirements", {'host_state': host_state}) return False return True manila-2.0.0/manila/scheduler/filters/ignore_attempted_hosts.py0000664000567000056710000000363212701407107026131 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from manila.scheduler.filters import base_host LOG = logging.getLogger(__name__) class IgnoreAttemptedHostsFilter(base_host.BaseHostFilter): """Filter out previously attempted hosts A host passes this filter if it has not already been attempted for scheduling. The scheduler needs to add previously attempted hosts to the 'retry' key of filter_properties in order for this to work correctly. For example:: { 'retry': { 'hosts': ['host1', 'host2'], 'num_attempts': 3, } } """ def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" attempted = filter_properties.get('retry') if not attempted: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled.") return True hosts = attempted.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s" % {'host': host, 'pass_msg': pass_msg, 'hosts': hosts}) return passes manila-2.0.0/manila/scheduler/filters/base.py0000664000567000056710000000721012701407107022265 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ import logging from manila.i18n import _LI from manila.scheduler import base_handler LOG = logging.getLogger(__name__) class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, filter_properties): """Check if an object passes a filter. Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, filter_properties): """Yield objects that pass the filter. Can be overridden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, filter_properties): yield obj # Set to true in a subclass if a filter only needs to be run once # for each request rather than for each instance run_filter_once_per_request = False def run_filter_for_index(self, index): """Check if filter needs to be run for the "index-th" instance. Return True if the filter needs to be run for the "index-th" instance in a request. Only need to override this if a filter needs anything other than "first only" or "all" behaviour. """ return not (self.run_filter_once_per_request and index > 0) class BaseFilterHandler(base_handler.BaseHandler): """Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) for filter_cls in filter_classes: cls_name = filter_cls.__name__ filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.debug("Filter %(cls_name)s says to stop filtering", {'cls_name': cls_name}) return list_objs = list(objs) msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)") % {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: LOG.info(msg) break LOG.debug(msg) return list_objs manila-2.0.0/manila/scheduler/filters/base_host.py0000664000567000056710000000252012701407107023321 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from manila.scheduler.filters import base class BaseHostFilter(base.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" return self.host_passes(obj, filter_properties) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class HostFilterHandler(base.BaseFilterHandler): def __init__(self, namespace): super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) manila-2.0.0/manila/scheduler/filters/extra_specs_ops.py0000664000567000056710000000465412701407107024565 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_utils import strutils # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: (strutils.bool_from_string(x) is strutils.bool_from_string(y)), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: if type(value) is bool: return value == strutils.bool_from_string( req, strict=False, default=req) else: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False try: if words and method(value, words[0]): return True except ValueError: pass return False manila-2.0.0/manila/scheduler/filters/json.py0000664000567000056710000001163212701407107022327 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_serialization import jsonutils import six from manila.scheduler.filters import base_host class JsonFilter(base_host.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts.""" def _op_compare(self, args, op): """Check if operator can compare the first arg with the others. Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Parse string. Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, six.string_types): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, filter_properties): """Filters hosts. Return a list of hosts that can fulfill the requirements specified in the query. """ # TODO(zhiteng) Add description for filter_properties structure # and scheduler_hints. try: query = filter_properties['scheduler_hints']['query'] except KeyError: query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False manila-2.0.0/manila/scheduler/filters/consistency_group.py0000664000567000056710000000403312701407107025130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host from manila.share import utils as share_utils LOG = log.getLogger(__name__) class ConsistencyGroupFilter(base_host.BaseHostFilter): """ConsistencyGroupFilter filters host based on compatibility with CG.""" def host_passes(self, host_state, filter_properties): """Return True if host will work with desired consistency group.""" cg = filter_properties.get('consistency_group') cg_support = filter_properties.get('cg_support') # NOTE(ameade): If creating a share not in a CG, then of course the # host is valid for the cg. if not cg: return True # NOTE(ameade): If the CG host can only support shares on the same # pool, then the only valid pool is that one. if cg_support == 'pool' and cg.get('host') == host_state.host: return True # NOTE(ameade): If the CG host can support shares on the same host, # then any pool on that backend will work. elif cg_support == 'host': cg_backend = share_utils.extract_host(cg['host']) host_backend = share_utils.extract_host(host_state.host) return cg_backend == host_backend LOG.debug("Host %(host)s is not compatible with consistency " "group %(cg)s" % {"host": host_state.host, "cg": cg['id']}) return False manila-2.0.0/manila/scheduler/filters/availability_zone.py0000664000567000056710000000241012701407107025055 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.scheduler.filters import base_host class AvailabilityZoneFilter(base_host.BaseHostFilter): """Filters Hosts by availability zone.""" # Availability zones do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('resource_properties', {}) availability_zone_id = props.get('availability_zone_id') if availability_zone_id: return (availability_zone_id == host_state.service['availability_zone_id']) return True manila-2.0.0/manila/scheduler/filters/capacity.py0000664000567000056710000001240112701407107023146 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log from manila.i18n import _LE from manila.i18n import _LW from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class CapacityFilter(base_host.BaseHostFilter): """CapacityFilter filters based on share host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" share_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_LE("Free capacity not set: " "share node info collection broken.")) return False free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb reserved = float(host_state.reserved_percentage) / 100 if free_space == 'unknown': # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space == 'unknown': # NOTE(xyang): If total_space is 'unknown' and # reserved is 0, we assume the back-ends can serve the request. # If total_space is 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. return reserved == 0 and share_size <= free_space total = float(total_space) if total <= 0: LOG.warning(_LW("Insufficient free space for share creation. " "Total capacity is %(total).2f on host %(host)s."), {"total": total, "host": host_state.host}) return False # NOTE(xyang): Calculate how much free space is left after taking # into account the reserved space. free = math.floor(free_space - total * reserved) msg_args = {"host": host_state.host, "requested": share_size, "available": free} LOG.debug("Space information for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) # NOTE(xyang): Only evaluate using max_over_subscription_ratio # if thin_provisioning is True. Check if the ratio of # provisioned capacity over total capacity would exceed # subscription ratio. # If max_over_subscription_ratio = 1, the provisioned_ratio # should still be limited by the max_over_subscription_ratio; # otherwise, it could result in infinite provisioning. if (host_state.thin_provisioning and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + share_size) / total) if provisioned_ratio > host_state.max_over_subscription_ratio: LOG.warning(_LW( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f would exceed the maximum over " "subscription ratio %(oversub_ratio).2f on host " "%(host)s."), {"provisioned_ratio": provisioned_ratio, "oversub_ratio": host_state.max_over_subscription_ratio, "host": host_state.host}) return False else: # NOTE(xyang): Adjust free_virtual calculation based on # free and max_over_subscription_ratio. adjusted_free_virtual = ( free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= share_size elif host_state.thin_provisioning: LOG.error(_LE("Invalid max_over_subscription_ratio: %(ratio)s. " "Valid value should be >= 1."), {"ratio": host_state.max_over_subscription_ratio}) return False if free < share_size: LOG.warning(_LW("Insufficient free space for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s"), msg_args) return False return True manila-2.0.0/manila/scheduler/weighers/0000775000567000056710000000000012701407265021153 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/weighers/__init__.py0000664000567000056710000000000012701407107023245 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/scheduler/weighers/base.py0000664000567000056710000001074112701407107022435 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pluggable Weighing support """ import abc import six from manila.scheduler import base_handler def normalize(weight_list, minval=None, maxval=None): """Normalize the values in a list between 0 and 1.0. The normalization is made regarding the lower and upper values present in weight_list. If the minval and/or maxval parameters are set, these values will be used instead of the minimum and maximum from the list. If all the values are equal, they are normalized to 0. """ if not weight_list: return () if maxval is None: maxval = max(weight_list) if minval is None: minval = min(weight_list) maxval = float(maxval) minval = float(minval) if minval == maxval: return [0] * len(weight_list) range_ = maxval - minval return ((i - minval) / range_ for i in weight_list) class WeighedObject(object): """Object with weight information.""" def __init__(self, obj, weight): self.obj = obj self.weight = weight def __repr__(self): return "" % (self.obj, self.weight) @six.add_metaclass(abc.ABCMeta) class BaseWeigher(object): """Base class for pluggable weighers. The attributes maxval and minval can be specified to set up the maximum and minimum values for the weighed objects. These values will then be taken into account in the normalization step, instead of taking the values from the calculated weighers. """ minval = None maxval = None def weight_multiplier(self): """How weighted this weigher should be. Override this method in a subclass, so that the returned value is read from a configuration option to permit operators specify a multiplier for the weigher. """ return 1.0 @abc.abstractmethod def _weigh_object(self, obj, weight_properties): """Override in a subclass to specify a weight for a specific object.""" def weigh_objects(self, weighed_obj_list, weight_properties): """Weigh multiple objects. Override in a subclass if you need access to all objects in order to calculate weighers. Do not modify the weight of an object here, just return a list of weighers. """ # Calculate the weighers weights = [] for obj in weighed_obj_list: weight = self._weigh_object(obj.obj, weight_properties) # Record the min and max values if they are None. If they anything # but none we assume that the weigher has set them if self.minval is None: self.minval = weight if self.maxval is None: self.maxval = weight if weight < self.minval: self.minval = weight elif weight > self.maxval: self.maxval = weight weights.append(weight) return weights class BaseWeightHandler(base_handler.BaseHandler): object_class = WeighedObject def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties): """Return a sorted (descending), normalized list of WeighedObjects.""" if not obj_list: return [] weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weights = weigher.weigh_objects(weighed_objs, weighing_properties) # Normalize the weighers weights = normalize(weights, minval=weigher.minval, maxval=weigher.maxval) for i, weight in enumerate(weights): obj = weighed_objs[i] obj.weight += weigher.weight_multiplier() * weight return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) manila-2.0.0/manila/scheduler/weighers/base_host.py0000664000567000056710000000241212701407107023466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weighers """ from manila.scheduler.weighers import base class WeighedHost(base.WeighedObject): def to_dict(self): return { 'weight': self.weight, 'host': self.obj.host, } def __repr__(self): return ("WeighedHost [host: %s, weight: %s]" % (self.obj.host, self.weight)) class BaseHostWeigher(base.BaseWeigher): """Base class for host weighers.""" pass class HostWeightHandler(base.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) manila-2.0.0/manila/scheduler/weighers/pool.py0000664000567000056710000000364312701407107022477 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila import context from manila.db import api as db_api from manila.scheduler.weighers import base_host from manila.share import utils pool_weight_opts = [ cfg.FloatOpt('pool_weight_multiplier', default=1.0, help='Multiplier used for weighing pools which have ' 'existing share servers. Negative numbers mean to spread' ' vs stack.'), ] CONF = cfg.CONF CONF.register_opts(pool_weight_opts) class PoolWeigher(base_host.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.pool_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Pools with existing share server win.""" pool_mapping = weight_properties.get('server_pools_mapping', {}) if not pool_mapping: return 0 ctx = context.get_admin_context() host = utils.extract_host(host_state.host, 'backend') servers = db_api.share_server_get_all_by_host(ctx, host) pool = utils.extract_host(host_state.host, 'pool') for server in servers: if any(pool == p['pool_name'] for p in pool_mapping.get( server['id'], [])): return 1 return 0 manila-2.0.0/manila/scheduler/weighers/capacity.py0000664000567000056710000000775712701407107023335 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Capacity Weigher. Weigh hosts by their virtual or actual free capacity. For thin provisioning, weigh hosts by their virtual free capacity calculated by the total capacity multiplied by the max over subscription ratio and subtracting the provisioned capacity; Otherwise, weigh hosts by their actual free capacity, taking into account the reserved space. The default is to spread shares across all hosts evenly. If you prefer stacking, you can set the 'capacity_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. """ import math from oslo_config import cfg from manila.scheduler.weighers import base_host capacity_weight_opts = [ cfg.FloatOpt('capacity_weight_multiplier', default=1.0, help='Multiplier used for weighing share capacity. ' 'Negative numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(capacity_weight_opts) class CapacityWeigher(base_host.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.capacity_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Higher weighers win. We want spreading to be the default.""" reserved = float(host_state.reserved_percentage) / 100 free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb if 'unknown' in (total_space, free_space): # NOTE(u_glide): "unknown" capacity always sorts to the bottom if CONF.capacity_weight_multiplier > 0: free = float('-inf') else: free = float('inf') else: total = float(total_space) if host_state.thin_provisioning: # NOTE(xyang): Calculate virtual free capacity for thin # provisioning. free = math.floor( total * host_state.max_over_subscription_ratio - host_state.provisioned_capacity_gb - total * reserved) else: # NOTE(xyang): Calculate how much free space is left after # taking into account the reserved space. free = math.floor(free_space - total * reserved) return free def weigh_objects(self, weighed_obj_list, weight_properties): weights = super(CapacityWeigher, self).weigh_objects(weighed_obj_list, weight_properties) # NOTE(u_glide): Replace -inf with (minimum - 1) and # inf with (maximum + 1) to avoid errors in # manila.scheduler.weighers.base.normalize() method if self.minval == float('-inf'): self.minval = self.maxval for val in weights: if float('-inf') < val < self.minval: self.minval = val self.minval -= 1 return [self.minval if w == float('-inf') else w for w in weights] elif self.maxval == float('inf'): self.maxval = self.minval for val in weights: if self.maxval < val < float('inf'): self.maxval = val self.maxval += 1 return [self.maxval if w == float('inf') else w for w in weights] else: return weights manila-2.0.0/manila/common/0000775000567000056710000000000012701407265016650 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/common/constants.py0000664000567000056710000001243612701407107021237 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_NEW = 'new' STATUS_CREATING = 'creating' STATUS_DELETING = 'deleting' STATUS_DELETED = 'deleted' STATUS_ERROR = 'error' STATUS_ERROR_DELETING = 'error_deleting' STATUS_AVAILABLE = 'available' STATUS_ACTIVE = 'active' STATUS_INACTIVE = 'inactive' STATUS_OUT_OF_SYNC = 'out_of_sync' STATUS_UPDATING = 'updating' STATUS_UPDATING_MULTIPLE = 'updating_multiple' STATUS_MANAGING = 'manage_starting' STATUS_MANAGE_ERROR = 'manage_error' STATUS_UNMANAGING = 'unmanage_starting' STATUS_UNMANAGE_ERROR = 'unmanage_error' STATUS_UNMANAGED = 'unmanaged' STATUS_EXTENDING = 'extending' STATUS_EXTENDING_ERROR = 'extending_error' STATUS_SHRINKING = 'shrinking' STATUS_SHRINKING_ERROR = 'shrinking_error' STATUS_MIGRATING = 'migrating' STATUS_MIGRATING_TO = 'migrating_to' STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR = ( 'shrinking_possible_data_loss_error' ) STATUS_REPLICATION_CHANGE = 'replication_change' TASK_STATE_MIGRATION_STARTING = 'migration_starting' TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' TASK_STATE_MIGRATION_SUCCESS = 'migration_success' TASK_STATE_MIGRATION_ERROR = 'migration_error' TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled' TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress' TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done' TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting' TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress' TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing' TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed' TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled' TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error' BUSY_TASK_STATES = ( TASK_STATE_MIGRATION_STARTING, TASK_STATE_MIGRATION_IN_PROGRESS, TASK_STATE_MIGRATION_COMPLETING, TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, TASK_STATE_DATA_COPYING_COMPLETED, ) BUSY_COPYING_STATES = ( TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, ) TRANSITIONAL_STATUSES = ( STATUS_CREATING, STATUS_DELETING, STATUS_MANAGING, STATUS_UNMANAGING, STATUS_EXTENDING, STATUS_SHRINKING, STATUS_MIGRATING, STATUS_MIGRATING_TO, ) UPDATING_RULES_STATUSES = ( STATUS_UPDATING, STATUS_UPDATING_MULTIPLE, ) SUPPORTED_SHARE_PROTOCOLS = ( 'NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS') SECURITY_SERVICES_ALLOWED_TYPES = ['active_directory', 'ldap', 'kerberos'] NFS_EXPORTS_FILE = '/etc/exports' NFS_EXPORTS_FILE_TEMP = '/var/lib/nfs/etab' MOUNT_FILE = '/etc/fstab' MOUNT_FILE_TEMP = '/etc/mtab' # Below represented ports are ranges (from, to) CIFS_PORTS = ( ("tcp", (445, 445)), ("tcp", (137, 139)), ("udp", (137, 139)), ("udp", (445, 445)), ) NFS_PORTS = ( ("tcp", (2049, 2049)), ("udp", (2049, 2049)), ) SSH_PORTS = ( ("tcp", (22, 22)), ) PING_PORTS = ( ("icmp", (-1, -1)), ) WINRM_PORTS = ( ("tcp", (5985, 5986)), ) SERVICE_INSTANCE_SECGROUP_DATA = ( CIFS_PORTS + NFS_PORTS + SSH_PORTS + PING_PORTS + WINRM_PORTS) ACCESS_LEVEL_RW = 'rw' ACCESS_LEVEL_RO = 'ro' ACCESS_LEVELS = ( ACCESS_LEVEL_RW, ACCESS_LEVEL_RO, ) TASK_STATE_STATUSES = ( TASK_STATE_MIGRATION_STARTING, TASK_STATE_MIGRATION_IN_PROGRESS, TASK_STATE_MIGRATION_COMPLETING, TASK_STATE_MIGRATION_SUCCESS, TASK_STATE_MIGRATION_ERROR, TASK_STATE_MIGRATION_CANCELLED, TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, TASK_STATE_DATA_COPYING_COMPLETED, TASK_STATE_DATA_COPYING_CANCELLED, TASK_STATE_DATA_COPYING_ERROR ) REPLICA_STATE_ACTIVE = 'active' REPLICA_STATE_IN_SYNC = 'in_sync' REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync' class ExtraSpecs(object): # Extra specs key names DRIVER_HANDLES_SHARE_SERVERS = "driver_handles_share_servers" SNAPSHOT_SUPPORT = "snapshot_support" REPLICATION_TYPE_SPEC = "replication_type" # Extra specs containers REQUIRED = ( DRIVER_HANDLES_SHARE_SERVERS, ) UNDELETABLE = ( DRIVER_HANDLES_SHARE_SERVERS, SNAPSHOT_SUPPORT, ) # NOTE(cknight): Some extra specs are necessary parts of the Manila API and # should be visible to non-admin users. UNDELETABLE specs are user-visible. TENANT_VISIBLE = UNDELETABLE + (REPLICATION_TYPE_SPEC, ) BOOLEAN = ( DRIVER_HANDLES_SHARE_SERVERS, SNAPSHOT_SUPPORT, ) manila-2.0.0/manila/common/__init__.py0000664000567000056710000000000012701407107020742 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/common/config.py0000664000567000056710000002240312701407112020457 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import socket from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from oslo_utils import netutils import six from manila.common import constants from manila import exception from manila.i18n import _ CONF = cfg.CONF log.register_options(CONF) core_opts = [ cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for manila-api.'), cfg.StrOpt('state_path', default='/var/lib/manila', help="Top-level directory for maintaining manila's state."), cfg.StrOpt('os_region_name', help='Region name of this node.'), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', default=netutils.get_my_ipv4(), help='IP address of this host.'), cfg.StrOpt('scheduler_topic', default='manila-scheduler', help='The topic scheduler nodes listen on.'), cfg.StrOpt('share_topic', default='manila-share', help='The topic share nodes listen on.'), cfg.StrOpt('data_topic', default='manila-data', help='The topic data nodes listen on.'), cfg.BoolOpt('enable_v1_api', default=False, help=_('Deploy v1 of the Manila API. This option is ' 'deprecated, is not used, and will be removed ' 'in a future release.')), cfg.BoolOpt('enable_v2_api', default=False, help=_('Deploy v2 of the Manila API. This option is ' 'deprecated, is not used, and will be removed ' 'in a future release.')), cfg.BoolOpt('api_rate_limit', default=True, help='Whether to rate limit the API.'), cfg.ListOpt('osapi_share_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'share_extension option with manila.api.contrib.' 'select_extensions.'), cfg.ListOpt('osapi_share_extension', default=['manila.api.contrib.standard_extensions'], help='The osapi share extensions to load.'), cfg.StrOpt('sqlite_db', default='manila.sqlite', help='The filename to use with sqlite.'), cfg.BoolOpt('sqlite_synchronous', default=True, help='If passed, use synchronous mode for sqlite.'), cfg.IntOpt('sql_idle_timeout', default=3600, help='Timeout before idle SQL connections are reaped.'), cfg.IntOpt('sql_max_retries', default=10, help='Maximum database connection retries during startup. ' '(setting -1 implies an infinite retry count).'), cfg.IntOpt('sql_retry_interval', default=10, help='Interval between retries of opening a SQL connection.'), cfg.StrOpt('scheduler_manager', default='manila.scheduler.manager.SchedulerManager', help='Full class name for the scheduler manager.'), cfg.StrOpt('share_manager', default='manila.share.manager.ShareManager', help='Full class name for the share manager.'), cfg.StrOpt('data_manager', default='manila.data.manager.DataManager', help='Full class name for the data manager.'), cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node. This can be an opaque identifier. ' 'It is not necessarily a hostname, FQDN, or IP address.'), # NOTE(vish): default to nova for compatibility with nova installs cfg.StrOpt('storage_availability_zone', default='nova', help='Availability zone of this node.'), cfg.StrOpt('default_share_type', help='Default share type to use.'), cfg.ListOpt('memcached_servers', help='Memcached servers or None for in process cache.'), cfg.StrOpt('share_usage_audit_period', default='month', help='Time period to generate share usages for. ' 'Time period must be hour, day, month or year.'), cfg.StrOpt('root_helper', default='sudo', help='Deprecated: command to use for running commands as ' 'root.'), cfg.StrOpt('rootwrap_config', help='Path to the rootwrap configuration file to use for ' 'running commands as root.'), cfg.BoolOpt('monkey_patch', default=False, help='Whether to log monkey patching.'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules or decorators to monkey patch.'), cfg.IntOpt('service_down_time', default=60, help='Maximum time since last check-in for up service.'), cfg.StrOpt('share_api_class', default='manila.share.api.API', help='The full class name of the share API class to use.'), cfg.StrOpt('auth_strategy', default='keystone', help='The strategy to use for auth. Supports noauth, keystone, ' 'and deprecated.'), cfg.ListOpt('enabled_share_backends', help='A list of share backend names to use. These backend ' 'names should be backed by a unique [CONFIG] group ' 'with its options.'), cfg.ListOpt('enabled_share_protocols', default=['NFS', 'CIFS'], help="Specify list of protocols to be allowed for share " "creation. Available values are '%s'" % six.text_type( constants.SUPPORTED_SHARE_PROTOCOLS)), ] CONF.register_opts(global_opts) def verify_share_protocols(): """Perfom verification of 'enabled_share_protocols'.""" msg = None supported_protocols = constants.SUPPORTED_SHARE_PROTOCOLS data = dict(supported=', '.join(supported_protocols)) if CONF.enabled_share_protocols: for share_proto in CONF.enabled_share_protocols: if share_proto.upper() not in supported_protocols: data.update({'share_proto': share_proto}) msg = ("Unsupported share protocol '%(share_proto)s' " "is set as enabled. Available values are " "%(supported)s. ") break else: msg = ("No share protocols were specified as enabled. " "Available values are %(supported)s. ") if msg: msg += ("Please specify one or more protocols using " "configuration option 'enabled_share_protocols'.") # NOTE(vponomaryov): use translation to unicode explicitly, # because of 'lazy' translations. msg = six.text_type(_(msg) % data) # noqa H701 raise exception.ManilaException(message=msg) def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Openstack-Manila-Api-Version', 'X-OpenStack-Manila-API-Experimental', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id'], expose_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Openstack-Manila-Api-Version', 'X-OpenStack-Manila-API-Experimental', 'X-Subject-Token', 'X-Service-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) manila-2.0.0/manila/common/client_auth.py0000664000567000056710000000754712701407107021531 0ustar jenkinsjenkins00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystoneauth1 import loading as ks_loading from keystoneauth1.loading._plugins.identity import v2 from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.i18n import _LW CONF = cfg.CONF LOG = log.getLogger(__name__) """Helper class to support keystone v2 and v3 for clients Builds auth and session context before instantiation of the actual client. In order to build this context a dedicated config group is needed to load all needed parameters dynamically. """ class AuthClientLoader(object): def __init__(self, client_class, exception_module, cfg_group, deprecated_opts_for_v2=None): self.client_class = client_class self.exception_module = exception_module self.group = cfg_group self.admin_auth = None self.conf = CONF self.session = None self.auth_plugin = None self.deprecated_opts_for_v2 = deprecated_opts_for_v2 @staticmethod def list_opts(group): """Generates a list of config option for a given group :param group: group name :return: list of auth default configuration """ opts = copy.deepcopy(ks_loading.register_session_conf_options( CONF, group)) opts.insert(0, ks_loading.get_auth_common_conf_options()[0]) for plugin_option in ks_loading.get_auth_plugin_conf_options( 'password'): found = False for option in opts: if option.name == plugin_option.name: found = True break if not found: opts.append(plugin_option) opts.sort(key=lambda x: x.name) return [(group, opts)] def _load_auth_plugin(self): if self.admin_auth: return self.admin_auth self.auth_plugin = ks_loading.load_auth_from_conf_options( CONF, self.group) if self.deprecated_opts_for_v2 and not self.auth_plugin: LOG.warn(_LW("Not specifying auth options is deprecated")) self.auth_plugin = v2.Password().load_from_options( **self.deprecated_opts_for_v2) if self.auth_plugin: return self.auth_plugin msg = _('Cannot load auth plugin for %s') % self.group raise self.exception_module.Unauthorized(message=msg) def get_client(self, context, admin=False, **kwargs): """Get's the client with the correct auth/session context """ auth_plugin = None if not self.session: self.session = ks_loading.load_session_from_conf_options( self.conf, self.group) if admin or (context.is_admin and not context.auth_token): if not self.admin_auth: self.admin_auth = self._load_auth_plugin() auth_plugin = self.admin_auth else: # NOTE(mkoderer): Manila basically needs admin clients for # it's actions. If needed this must be enhanced later raise exception.ManilaException( _("Client (%s) is not flagged as admin") % self.group) return self.client_class(session=self.session, auth=auth_plugin, **kwargs) manila-2.0.0/manila/version.py0000664000567000056710000000157412701407107017421 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version MANILA_VENDOR = "OpenStack Foundation" MANILA_PRODUCT = "OpenStack Manila" MANILA_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('manila') version_string = version_info.version_string manila-2.0.0/manila/db/0000775000567000056710000000000012701407265015745 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/__init__.py0000664000567000056710000000144212701407107020052 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Manila """ from manila.db.api import * # noqa manila-2.0.0/manila/db/sqlalchemy/0000775000567000056710000000000012701407265020107 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/sqlalchemy/__init__.py0000664000567000056710000000000012701407107022201 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/sqlalchemy/query.py0000664000567000056710000000301312701407107021616 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import orm import sqlalchemy from manila.common import constants class Query(orm.Query): def soft_delete(self, synchronize_session='evaluate', update_status=False, status_field_name='status'): if update_status: setattr(self, status_field_name, constants.STATUS_DELETED) return super(Query, self).soft_delete(synchronize_session) def get_maker(engine, autocommit=True, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=orm.Session, autocommit=autocommit, expire_on_commit=expire_on_commit, query_cls=Query) # NOTE(uglide): Monkey patch oslo_db get_maker() function to use custom Query orm.get_maker = get_maker manila-2.0.0/manila/db/sqlalchemy/models.py0000664000567000056710000011045512701407107021745 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Manila data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_log import log from sqlalchemy import Column, Integer, String, schema from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import orm from sqlalchemy import ForeignKey, DateTime, Boolean, Enum from manila.common import constants CONF = cfg.CONF BASE = declarative_base() LOG = log.getLogger(__name__) class ManilaBase(models.ModelBase, models.TimestampMixin, models.SoftDeleteMixin): """Base class for Manila Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} metadata = None def to_dict(self): model_dict = {} for k, v in self.items(): if not issubclass(type(v), ManilaBase): model_dict[k] = v return model_dict def soft_delete(self, session, update_status=False, status_field_name='status'): """Mark this object as deleted.""" if update_status: setattr(self, status_field_name, constants.STATUS_DELETED) return super(ManilaBase, self).soft_delete(session) class Service(BASE, ManilaBase): """Represents a running service on a host.""" __tablename__ = 'services' id = Column(Integer, primary_key=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', primaryjoin=( 'and_(' 'Service.availability_zone_id == ' 'AvailabilityZone.id, ' 'AvailabilityZone.deleted == \'False\')' ) ) class ManilaNode(BASE, ManilaBase): """Represents a running manila service on a host.""" __tablename__ = 'manila_nodes' id = Column(Integer, primary_key=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=True) class Quota(BASE, ManilaBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class ProjectUserQuota(BASE, ManilaBase): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' id = Column(Integer, primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) user_id = Column(String(255), nullable=False) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class QuotaClass(BASE, ManilaBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' id = Column(Integer, primary_key=True) class_name = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class QuotaUsage(BASE, ManilaBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) user_id = Column(String(255)) resource = Column(String(255)) in_use = Column(Integer) reserved = Column(Integer) @property def total(self): return self.in_use + self.reserved until_refresh = Column(Integer, nullable=True) class Reservation(BASE, ManilaBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) project_id = Column(String(255), index=True) user_id = Column(String(255)) resource = Column(String(255)) delta = Column(Integer) expire = Column(DateTime, nullable=False) # usage = orm.relationship( # "QuotaUsage", # foreign_keys=usage_id, # primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' # 'QuotaUsage.deleted == 0)') class Share(BASE, ManilaBase): """Represents an NFS and CIFS shares.""" __tablename__ = 'shares' _extra_keys = ['name', 'export_location', 'export_locations', 'status', 'host', 'share_server_id', 'share_network_id', 'availability_zone', 'access_rules_status'] @property def name(self): return CONF.share_name_template % self.id @property def export_location(self): if len(self.instances) > 0: return self.instance.export_location @property def is_busy(self): # Make sure share is not busy, i.e., not part of a migration if self.task_state in constants.BUSY_TASK_STATES: return True return False @property def export_locations(self): # TODO(gouthamr): Return AZ specific export locations for replicated # shares. # NOTE(gouthamr): For a replicated share, export locations of the # 'active' instances are chosen, if 'available'. all_export_locations = [] select_instances = list(filter( lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, self.instances)) or self.instances for instance in select_instances: if instance['status'] == constants.STATUS_AVAILABLE: for export_location in instance.export_locations: all_export_locations.append(export_location['path']) return all_export_locations def __getattr__(self, item): deprecated_properties = ('host', 'share_server_id', 'share_network_id', 'availability_zone') proxified_properties = ('status',) + deprecated_properties if item in deprecated_properties: msg = ("Property '%s' is deprecated. Please use appropriate " "property from share instance." % item) LOG.warning(msg) if item in proxified_properties: return getattr(self.instance, item, None) raise AttributeError(item) @property def share_server_id(self): return self.__getattr__('share_server_id') @property def has_replicas(self): if len(self.instances) > 1: # NOTE(gouthamr): The 'primary' instance of a replicated share # has a 'replica_state' set to 'active'. Only the secondary replica # instances need to be regarded as true 'replicas' by users. replicas = (list(filter(lambda x: x['replica_state'] is not None, self.instances))) return len(replicas) > 1 return False @property def instance(self): # NOTE(gouthamr): The order of preference: status 'replication_change', # followed by 'available' and 'error'. If replicated share and # not undergoing a 'replication_change', only 'active' instances are # preferred. result = None if len(self.instances) > 0: order = (constants.STATUS_REPLICATION_CHANGE, constants.STATUS_MIGRATING, constants.STATUS_AVAILABLE, constants.STATUS_ERROR) other_statuses = ( [x['status'] for x in self.instances if x['status'] not in order and x['status'] not in constants.TRANSITIONAL_STATUSES] ) order = (order + tuple(other_statuses) + constants.TRANSITIONAL_STATUSES) sorted_instances = sorted( self.instances, key=lambda x: order.index(x['status'])) select_instances = sorted_instances if (select_instances[0]['status'] != constants.STATUS_REPLICATION_CHANGE): select_instances = ( list(filter(lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, sorted_instances)) or sorted_instances ) result = select_instances[0] return result @property def access_rules_status(self): return get_access_rules_status(self.instances) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) size = Column(Integer) display_name = Column(String(255)) display_description = Column(String(255)) snapshot_id = Column(String(36)) snapshot_support = Column(Boolean, default=True) replication_type = Column(String(255), nullable=True) share_proto = Column(String(255)) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=True) is_public = Column(Boolean, default=False) consistency_group_id = Column(String(36), ForeignKey('consistency_groups.id'), nullable=True) source_cgsnapshot_member_id = Column(String(36), nullable=True) task_state = Column(String(255)) instances = orm.relationship( "ShareInstance", lazy='immediate', primaryjoin=( 'and_(' 'Share.id == ShareInstance.share_id, ' 'ShareInstance.deleted == "False")' ), viewonly=True, join_depth=2, ) share_type = orm.relationship( "ShareTypes", lazy=True, foreign_keys=share_type_id, primaryjoin='and_(' 'Share.share_type_id == ShareTypes.id, ' 'ShareTypes.deleted == "False")') class ShareInstance(BASE, ManilaBase): __tablename__ = 'share_instances' _extra_keys = ['name', 'export_location', 'availability_zone', 'replica_state'] _proxified_properties = ('user_id', 'project_id', 'size', 'display_name', 'display_description', 'snapshot_id', 'share_proto', 'share_type_id', 'is_public', 'consistency_group_id', 'source_cgsnapshot_member_id') def set_share_data(self, share): for share_property in self._proxified_properties: setattr(self, share_property, share[share_property]) @property def name(self): return CONF.share_name_template % self.id @property def export_location(self): if len(self.export_locations) > 0: return self.export_locations[0]['path'] @property def availability_zone(self): if self._availability_zone: return self._availability_zone['name'] id = Column(String(36), primary_key=True) share_id = Column(String(36), ForeignKey('shares.id')) deleted = Column(String(36), default='False') host = Column(String(255)) status = Column(String(255)) ACCESS_STATUS_PRIORITIES = { constants.STATUS_ACTIVE: 0, constants.STATUS_OUT_OF_SYNC: 1, constants.STATUS_UPDATING: 2, constants.STATUS_UPDATING_MULTIPLE: 3, constants.STATUS_ERROR: 4, } access_rules_status = Column(Enum(constants.STATUS_ACTIVE, constants.STATUS_OUT_OF_SYNC, constants.STATUS_UPDATING, constants.STATUS_UPDATING_MULTIPLE, constants.STATUS_ERROR), default=constants.STATUS_ACTIVE) scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) replica_state = Column(String(255), nullable=True) availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) _availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', foreign_keys=availability_zone_id, primaryjoin=( 'and_(' 'ShareInstance.availability_zone_id == ' 'AvailabilityZone.id, ' 'AvailabilityZone.deleted == \'False\')' ) ) export_locations = orm.relationship( "ShareInstanceExportLocations", lazy='immediate', primaryjoin=( 'and_(' 'ShareInstance.id == ' 'ShareInstanceExportLocations.share_instance_id, ' 'ShareInstanceExportLocations.deleted == 0)' ) ) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=True) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=True) class ShareInstanceExportLocations(BASE, ManilaBase): """Represents export locations of share instances.""" __tablename__ = 'share_instance_export_locations' _extra_keys = ['el_metadata', ] @property def el_metadata(self): el_metadata = {} for meta in self._el_metadata_bare: # pylint: disable=E1101 el_metadata[meta['key']] = meta['value'] return el_metadata id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False, unique=True) share_instance_id = Column( String(36), ForeignKey('share_instances.id'), nullable=False) path = Column(String(2000)) is_admin_only = Column(Boolean, default=False, nullable=False) class ShareInstanceExportLocationsMetadata(BASE, ManilaBase): """Represents export location metadata of share instances.""" __tablename__ = "share_instance_export_locations_metadata" _extra_keys = ['export_location_uuid', ] id = Column(Integer, primary_key=True) export_location_id = Column( Integer, ForeignKey("share_instance_export_locations.id"), nullable=False) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) export_location = orm.relationship( ShareInstanceExportLocations, backref="_el_metadata_bare", foreign_keys=export_location_id, lazy='immediate', primaryjoin="and_(" "%(cls_name)s.export_location_id == " "ShareInstanceExportLocations.id," "%(cls_name)s.deleted == 0)" % { "cls_name": "ShareInstanceExportLocationsMetadata"}) @property def export_location_uuid(self): return self.export_location.uuid # pylint: disable=E1101 class ShareTypes(BASE, ManilaBase): """Represent possible share_types of volumes offered.""" __tablename__ = "share_types" id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') name = Column(String(255)) is_public = Column(Boolean, default=True) class ShareTypeProjects(BASE, ManilaBase): """Represent projects associated share_types.""" __tablename__ = "share_type_projects" __table_args__ = (schema.UniqueConstraint( "share_type_id", "project_id", "deleted", name="uniq_share_type_projects0share_type_id0project_id0deleted"), ) id = Column(Integer, primary_key=True) share_type_id = Column(Integer, ForeignKey('share_types.id'), nullable=False) project_id = Column(String(255)) share_type = orm.relationship( ShareTypes, backref="projects", foreign_keys=share_type_id, primaryjoin='and_(' 'ShareTypeProjects.share_type_id == ShareTypes.id,' 'ShareTypeProjects.deleted == 0)') class ShareTypeExtraSpecs(BASE, ManilaBase): """Represents additional specs as key/value pairs for a share_type.""" __tablename__ = 'share_type_extra_specs' id = Column(Integer, primary_key=True) key = Column("spec_key", String(255)) value = Column("spec_value", String(255)) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=False) share_type = orm.relationship( ShareTypes, backref="extra_specs", foreign_keys=share_type_id, primaryjoin='and_(' 'ShareTypeExtraSpecs.share_type_id == ShareTypes.id,' 'ShareTypeExtraSpecs.deleted == 0)' ) class ShareMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a share.""" __tablename__ = 'share_metadata' id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) share_id = Column(String(36), ForeignKey('shares.id'), nullable=False) share = orm.relationship(Share, backref="share_metadata", foreign_keys=share_id, primaryjoin='and_(' 'ShareMetadata.share_id == Share.id,' 'ShareMetadata.deleted == 0)') class ShareAccessMapping(BASE, ManilaBase): """Represents access to share.""" __tablename__ = 'share_access_map' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_id = Column(String(36), ForeignKey('shares.id')) access_type = Column(String(255)) access_to = Column(String(255)) access_level = Column(Enum(*constants.ACCESS_LEVELS), default=constants.ACCESS_LEVEL_RW) instance_mappings = orm.relationship( "ShareInstanceAccessMapping", lazy='immediate', primaryjoin=( 'and_(' 'ShareAccessMapping.id == ' 'ShareInstanceAccessMapping.access_id, ' 'ShareInstanceAccessMapping.deleted == "False")' ) ) @property def state(self): instances = [im.instance for im in self.instance_mappings] access_rules_status = get_access_rules_status(instances) if access_rules_status in ( constants.STATUS_OUT_OF_SYNC, constants.STATUS_UPDATING, constants.STATUS_UPDATING_MULTIPLE): return constants.STATUS_NEW else: return access_rules_status class ShareInstanceAccessMapping(BASE, ManilaBase): """Represents access to individual share instances.""" __tablename__ = 'share_instance_access_map' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_instance_id = Column(String(36), ForeignKey('share_instances.id')) access_id = Column(String(36), ForeignKey('share_access_map.id')) instance = orm.relationship( "ShareInstance", lazy='immediate', primaryjoin=( 'and_(' 'ShareInstanceAccessMapping.share_instance_id == ' 'ShareInstance.id, ' 'ShareInstanceAccessMapping.deleted == "False")' ) ) class ShareSnapshot(BASE, ManilaBase): """Represents a snapshot of a share.""" __tablename__ = 'share_snapshots' _extra_keys = ['name', 'share_name', 'status', 'progress', 'provider_location', 'aggregate_status'] def __getattr__(self, item): proxified_properties = ('status', 'progress', 'provider_location') if item in proxified_properties: return getattr(self.instance, item, None) raise AttributeError(item) @property def name(self): return CONF.share_snapshot_name_template % self.id @property def share_name(self): return CONF.share_name_template % self.share_id @property def instance(self): result = None if len(self.instances) > 0: def qualified_replica(x): preferred_statuses = (constants.REPLICA_STATE_ACTIVE,) return x['replica_state'] in preferred_statuses replica_snapshots = list(filter( lambda x: qualified_replica(x.share_instance), self.instances)) snapshot_instances = replica_snapshots or self.instances result = snapshot_instances[0] return result @property def aggregate_status(self): """Get the aggregated 'status' of all instances. A snapshot is supposed to be truly 'available' when it is available across all of the share instances of the parent share object. In case of replication, we only consider replicas (share instances) that are in 'in_sync' replica_state. """ def qualified_replica(x): preferred_statuses = (constants.REPLICA_STATE_ACTIVE, constants.REPLICA_STATE_IN_SYNC) return x['replica_state'] in preferred_statuses replica_snapshots = list(filter( lambda x: qualified_replica(x['share_instance']), self.instances)) if not replica_snapshots: return self.status order = (constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_ERROR, constants.STATUS_AVAILABLE) other_statuses = [x['status'] for x in self.instances if x['status'] not in order] order = (order + tuple(other_statuses)) sorted_instances = sorted( replica_snapshots, key=lambda x: order.index(x['status'])) return sorted_instances[0].status id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) share_id = Column(String(36)) size = Column(Integer) display_name = Column(String(255)) display_description = Column(String(255)) share_size = Column(Integer) share_proto = Column(String(255)) share = orm.relationship(Share, backref="snapshots", foreign_keys=share_id, primaryjoin='and_(' 'ShareSnapshot.share_id == Share.id,' 'ShareSnapshot.deleted == "False")') instances = orm.relationship( "ShareSnapshotInstance", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshot.id == ShareSnapshotInstance.snapshot_id, ' 'ShareSnapshotInstance.deleted == "False")' ), viewonly=True, join_depth=2, ) class ShareSnapshotInstance(BASE, ManilaBase): """Represents a snapshot of a share.""" __tablename__ = 'share_snapshot_instances' _extra_keys = ['name', 'share_id', 'share_name'] @property def name(self): return CONF.share_snapshot_name_template % self.id @property def share_name(self): return CONF.share_name_template % self.share_instance_id @property def share_id(self): # NOTE(u_glide): This property required for compatibility # with share drivers return self.share_instance_id id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') snapshot_id = Column( String(36), ForeignKey('share_snapshots.id'), nullable=False) share_instance_id = Column( String(36), ForeignKey('share_instances.id'), nullable=False) status = Column(String(255)) progress = Column(String(255)) provider_location = Column(String(255)) share_instance = orm.relationship( ShareInstance, backref="snapshot_instances", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshotInstance.share_instance_id == ShareInstance.id,' 'ShareSnapshotInstance.deleted == "False")') ) class SecurityService(BASE, ManilaBase): """Security service information for manila shares.""" __tablename__ = 'security_services' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') project_id = Column(String(36), nullable=False) type = Column(String(32), nullable=False) dns_ip = Column(String(64), nullable=True) server = Column(String(255), nullable=True) domain = Column(String(255), nullable=True) user = Column(String(255), nullable=True) password = Column(String(255), nullable=True) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) class ShareNetwork(BASE, ManilaBase): """Represents network data used by share.""" __tablename__ = 'share_networks' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') project_id = Column(String(36), nullable=False) user_id = Column(String(36), nullable=False) nova_net_id = Column(String(36), nullable=True) neutron_net_id = Column(String(36), nullable=True) neutron_subnet_id = Column(String(36), nullable=True) network_type = Column(String(32), nullable=True) segmentation_id = Column(Integer, nullable=True) cidr = Column(String(64), nullable=True) ip_version = Column(Integer, nullable=True) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) security_services = orm.relationship( "SecurityService", secondary="share_network_security_service_association", backref="share_networks", primaryjoin='and_(' 'ShareNetwork.id == ' 'ShareNetworkSecurityServiceAssociation.share_network_id,' 'ShareNetworkSecurityServiceAssociation.deleted == 0,' 'ShareNetwork.deleted == "False")', secondaryjoin='and_(' 'SecurityService.id == ' 'ShareNetworkSecurityServiceAssociation.security_service_id,' 'SecurityService.deleted == "False")') share_instances = orm.relationship( "ShareInstance", backref='share_network', primaryjoin='and_(' 'ShareNetwork.id == ShareInstance.share_network_id,' 'ShareInstance.deleted == "False")') share_servers = orm.relationship( "ShareServer", backref='share_network', primaryjoin='and_(ShareNetwork.id == ShareServer.share_network_id,' 'ShareServer.deleted == "False")') class ShareServer(BASE, ManilaBase): """Represents share server used by share.""" __tablename__ = 'share_servers' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=True) host = Column(String(255), nullable=False) status = Column(Enum(constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_DELETED), default=constants.STATUS_INACTIVE) network_allocations = orm.relationship( "NetworkAllocation", primaryjoin='and_(' 'ShareServer.id == NetworkAllocation.share_server_id,' 'NetworkAllocation.deleted == "False")') share_instances = orm.relationship( "ShareInstance", backref='share_server', primaryjoin='and_(' 'ShareServer.id == ShareInstance.share_server_id,' 'ShareInstance.deleted == "False")') consistency_groups = orm.relationship( "ConsistencyGroup", backref='share_server', primaryjoin='and_(' 'ShareServer.id == ConsistencyGroup.share_server_id,' 'ConsistencyGroup.deleted == "False")') _backend_details = orm.relationship( "ShareServerBackendDetails", lazy='immediate', viewonly=True, primaryjoin='and_(' 'ShareServer.id == ' 'ShareServerBackendDetails.share_server_id, ' 'ShareServerBackendDetails.deleted == "False")') @property def backend_details(self): return {model['key']: model['value'] for model in self._backend_details} _extra_keys = ['backend_details'] class ShareServerBackendDetails(BASE, ManilaBase): """Represents a metadata key/value pair for a share server.""" __tablename__ = 'share_server_backend_details' deleted = Column(String(36), default='False') id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=False) class ShareNetworkSecurityServiceAssociation(BASE, ManilaBase): """Association table between compute_zones and compute_nodes tables.""" __tablename__ = 'share_network_security_service_association' id = Column(Integer, primary_key=True) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=False) security_service_id = Column(String(36), ForeignKey('security_services.id'), nullable=False) class NetworkAllocation(BASE, ManilaBase): """Represents network allocation data.""" __tablename__ = 'network_allocations' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') label = Column(String(255), nullable=True) ip_address = Column(String(64), nullable=True) ip_version = Column(Integer, nullable=True) cidr = Column(String(64), nullable=True) network_type = Column(String(32), nullable=True) segmentation_id = Column(Integer, nullable=True) mac_address = Column(String(32), nullable=True) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=False) class DriverPrivateData(BASE, ManilaBase): """Represents a private data as key-value pairs for a driver.""" __tablename__ = 'drivers_private_data' host = Column(String(255), nullable=False, primary_key=True) entity_uuid = Column(String(36), nullable=False, primary_key=True) key = Column(String(255), nullable=False, primary_key=True) value = Column(String(1023), nullable=False) class AvailabilityZone(BASE, ManilaBase): """Represents a private data as key-value pairs for a driver.""" __tablename__ = 'availability_zones' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') name = Column(String(255), nullable=False) class ConsistencyGroup(BASE, ManilaBase): """Represents a consistency group.""" __tablename__ = 'consistency_groups' id = Column(String(36), primary_key=True) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) deleted = Column(String(36), default='False') host = Column(String(255)) name = Column(String(255)) description = Column(String(255)) status = Column(String(255)) source_cgsnapshot_id = Column(String(36)) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=True) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=True) class CGSnapshot(BASE, ManilaBase): """Represents a cgsnapshot.""" __tablename__ = 'cgsnapshots' id = Column(String(36), primary_key=True) consistency_group_id = Column(String(36), ForeignKey('consistency_groups.id')) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) deleted = Column(String(36), default='False') name = Column(String(255)) description = Column(String(255)) status = Column(String(255)) consistency_group = orm.relationship( ConsistencyGroup, backref="cgsnapshots", foreign_keys=consistency_group_id, primaryjoin=('and_(' 'CGSnapshot.consistency_group_id == ConsistencyGroup.id,' 'CGSnapshot.deleted == "False")') ) class ConsistencyGroupShareTypeMapping(BASE, ManilaBase): """Represents the share types in a consistency group.""" __tablename__ = 'consistency_group_share_type_mappings' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') consistency_group_id = Column(String(36), ForeignKey('consistency_groups.id'), nullable=False) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=False) consistency_group = orm.relationship( ConsistencyGroup, backref="share_types", foreign_keys=consistency_group_id, primaryjoin=('and_(' 'ConsistencyGroupShareTypeMapping.consistency_group_id ' '== ConsistencyGroup.id,' 'ConsistencyGroupShareTypeMapping.deleted == "False")') ) class CGSnapshotMember(BASE, ManilaBase): """Represents the share snapshots in a consistency group snapshot.""" __tablename__ = 'cgsnapshot_members' id = Column(String(36), primary_key=True) cgsnapshot_id = Column(String(36), ForeignKey('cgsnapshots.id')) share_id = Column(String(36), ForeignKey('shares.id')) share_instance_id = Column(String(36), ForeignKey('share_instances.id')) size = Column(Integer) status = Column(String(255)) share_proto = Column(String(255)) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=True) user_id = Column(String(255)) project_id = Column(String(255)) deleted = Column(String(36), default='False') cgsnapshot = orm.relationship( CGSnapshot, backref="cgsnapshot_members", foreign_keys=cgsnapshot_id, primaryjoin='CGSnapshot.id == CGSnapshotMember.cgsnapshot_id') def register_models(): """Register Models and create metadata. Called from manila.db.sqlalchemy.__init__ as part of loading the driver, it will never need to be called explicitly elsewhere unless the connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Service, Share, ShareAccessMapping, ShareSnapshot ) engine = create_engine(CONF.database.connection, echo=False) for model in models: model.metadata.create_all(engine) def get_access_rules_status(instances): share_access_status = constants.STATUS_ACTIVE if len(instances) == 0: return share_access_status priorities = ShareInstance.ACCESS_STATUS_PRIORITIES for instance in instances: if instance['status'] != constants.STATUS_AVAILABLE: continue instance_access_status = instance['access_rules_status'] if priorities.get(instance_access_status) > priorities.get( share_access_status): share_access_status = instance_access_status if share_access_status == constants.STATUS_ERROR: break return share_access_status manila-2.0.0/manila/db/sqlalchemy/api.py0000664000567000056710000037005712701407112021235 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import copy import datetime import sys import uuid import warnings # NOTE(uglide): Required to override default oslo_db Query class import manila.db.sqlalchemy.query # noqa from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exception from oslo_db import options as db_options from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log from oslo_utils import timeutils from oslo_utils import uuidutils import six from sqlalchemy import or_ from sqlalchemy.orm import joinedload from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from manila.common import constants from manila.db.sqlalchemy import models from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW CONF = cfg.CONF LOG = log.getLogger(__name__) _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = [] _FACADE = None _DEFAULT_SQL_CONNECTION = 'sqlite://' db_options.set_defaults(cfg.CONF, connection=_DEFAULT_SQL_CONNECTION) def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = session.EngineFacade.from_config(cfg.CONF) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_share_exists(f): """Decorator to require the specified share to exist. Requires the wrapped function to use context and share_id as their first two arguments. """ def wrapper(context, share_id, *args, **kwargs): share_get(context, share_id) return f(context, share_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_share_instance_exists(f): """Decorator to require the specified share instance to exist. Requires the wrapped function to use context and share_instance_id as their first two arguments. """ def wrapper(context, share_instance_id, *args, **kwargs): share_instance_get(context, share_instance_id) return f(context, share_instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param model: model to query. Must be a subclass of ModelBase. :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') kwargs = dict() if project_only and not context.is_admin: kwargs['project_id'] = context.project_id if read_deleted in ('no', 'n', False): kwargs['deleted'] = False elif read_deleted in ('yes', 'y', True): kwargs['deleted'] = True return db_utils.model_query( model=model, session=session, args=args, **kwargs) def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def ensure_model_dict_has_id(model_dict): if not model_dict.get('id'): model_dict['id'] = uuidutils.generate_uuid() return model_dict def _sync_shares(context, project_id, user_id, session): (shares, gigs) = share_data_get_for_project(context, project_id, user_id, session=session) return {'shares': shares} def _sync_snapshots(context, project_id, user_id, session): (snapshots, gigs) = snapshot_data_get_for_project(context, project_id, user_id, session=session) return {'snapshots': snapshots} def _sync_gigabytes(context, project_id, user_id, session): _junk, share_gigs = share_data_get_for_project( context, project_id, user_id, session=session) return dict(gigabytes=share_gigs) def _sync_snapshot_gigabytes(context, project_id, user_id, session): _junk, snapshot_gigs = snapshot_data_get_for_project( context, project_id, user_id, session=session) return dict(snapshot_gigabytes=snapshot_gigs) def _sync_share_networks(context, project_id, user_id, session): share_networks = share_network_get_all_by_project(context, project_id, user_id, session=session) return {'share_networks': len(share_networks)} QUOTA_SYNC_FUNCTIONS = { '_sync_shares': _sync_shares, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_snapshot_gigabytes': _sync_snapshot_gigabytes, '_sync_share_networks': _sync_share_networks, } ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.soft_delete(session) @require_admin_context def service_get(context, service_id, session=None): result = model_query( context, models.Service, session=session).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): result = model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() if not result: raise exception.ServiceNotFound(service_id=host) return result @require_admin_context def service_get_all_by_host(context, host): return model_query( context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_share_sorted(context): session = get_session() with session.begin(): topic = CONF.share_topic label = 'share_gigabytes' subq = model_query(context, models.Share, func.sum(models.Share.size).label(label), session=session, read_deleted="no").\ join(models.ShareInstance, models.ShareInstance.share_id == models.Share.id).\ group_by(models.ShareInstance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): session = get_session() ensure_availability_zone_exists(context, values, session) service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True with session.begin(): service_ref.save(session) return service_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def service_update(context, service_id, values): session = get_session() ensure_availability_zone_exists(context, values, session, strict=False) with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project_and_user(context, project_id, user_id): authorize_project_context(context, project_id) user_quotas = model_query(context, models.ProjectUserQuota, models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for quota in user_quotas: result[quota.resource] = quota.hard_limit return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_get_all(context, project_id): authorize_project_context(context, project_id) result = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() return result @require_admin_context def quota_create(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS if per_user: check = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ filter_by(resource=resource).\ all() else: check = model_query(context, models.Quota).\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ all() if check: raise exception.QuotaExists(project_id=project_id, resource=resource) quota_ref = models.ProjectUserQuota() if per_user else models.Quota() if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit session = get_session() with session.begin(): quota_ref.save(session) return quota_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_update(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS model = models.ProjectUserQuota if per_user else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context def quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).\ all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit session = get_session() with session.begin(): quota_class_ref.save(session) return quota_class_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_class_update(context, class_name, resource, limit): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @require_context def quota_usage_get(context, project_id, resource, user_id=None): query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: if resource not in PER_PROJECT_QUOTAS: result = query.filter_by(user_id=user_id).first() else: result = query.filter_by(user_id=None).first() else: result = query.first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result def _quota_usage_get_all(context, project_id, user_id=None): authorize_project_context(context, project_id) query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id) result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None)) result['user_id'] = user_id rows = query.all() for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_context def quota_usage_get_all_by_project(context, project_id): return _quota_usage_get_all(context, project_id) @require_context def quota_usage_get_all_by_project_and_user(context, project_id, user_id): return _quota_usage_get_all(context, project_id, user_id=user_id) def _quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.user_id = user_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh # updated_at is needed for judgement of max_age quota_usage_ref.updated_at = timeutils.utcnow() quota_usage_ref.save(session=session) return quota_usage_ref @require_admin_context def quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh): session = get_session() return _quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_usage_update(context, project_id, user_id, resource, **kwargs): updates = {} for key in ['in_use', 'reserved', 'until_refresh']: if key in kwargs: updates[key] = kwargs[key] result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None)).\ update(updates) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) ################### @require_context def reservation_get(context, uuid, session=None): result = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(uuid=uuid).first() if not result: raise exception.ReservationNotFound(uuid=uuid) return result @require_admin_context def reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire): return _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire) def _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_user_quota_usages(context, session, project_id, user_id): # Broken out for testability rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None)).\ with_lockmode('update').\ all() return {row.resource: row for row in rows} def _get_project_quota_usages(context, session, project_id): rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ with_lockmode('update').\ all() result = dict() # Get the total count of in_use,reserved for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved result[row.resource]['total'] += (row.in_use + row.reserved) else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved, total=row.in_use + row.reserved) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_reserve(context, resources, project_quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id if user_id is None: user_id = context.user_id # Get the current usages user_usages = _get_user_quota_usages(context, session, project_id, user_id) project_usages = _get_project_quota_usages(context, session, project_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if ((resource not in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create( elevated, project_id, user_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif ((resource in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create( elevated, project_id, None, resource, 0, 0, until_refresh or None, session=session) refresh = True elif user_usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif user_usages[resource].until_refresh is not None: user_usages[resource].until_refresh -= 1 if user_usages[resource].until_refresh <= 0: refresh = True elif max_age and (user_usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age: refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] updates = sync(elevated, project_id, user_id, session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if ((res not in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create( elevated, project_id, user_id, res, 0, 0, until_refresh or None, session=session) if ((res in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create( elevated, project_id, None, res, 0, 0, until_refresh or None, session=session) if user_usages[res].in_use != in_use: LOG.debug('quota_usages out of sync, updating. ' 'project_id: %(project_id)s, ' 'user_id: %(user_id)s, ' 'resource: %(res)s, ' 'tracked usage: %(tracked_use)s, ' 'actual usage: %(in_use)s', {'project_id': project_id, 'user_id': user_id, 'res': res, 'tracked_use': user_usages[res].in_use, 'in_use': in_use}) # Update the usage user_usages[res].in_use = in_use user_usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [res for res, delta in deltas.items() if delta < 0 and delta + user_usages[res].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. for key, value in user_usages.items(): if key not in project_usages: project_usages[key] = value overs = [res for res, delta in deltas.items() if user_quotas[res] >= 0 and delta >= 0 and (project_quotas[res] < delta + project_usages[res]['total'] or user_quotas[res] < delta + user_usages[res].total)] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for res, delta in deltas.items(): reservation = _reservation_create(elevated, uuidutils.generate_uuid(), user_usages[res], project_id, user_id, res, delta, expire, session=session) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: user_usages[res].reserved += delta # Apply updates to the usages table for usage_ref in user_usages.values(): session.add(usage_ref) if unders: LOG.warning(_LW("Change will make usage less than 0 for the following " "resources: %s"), unders) if overs: if project_quotas == user_quotas: usages = project_usages else: usages = user_usages usages = {k: dict(in_use=v['in_use'], reserved=v['reserved']) for k, v in usages.items()} raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas, usages=usages) return reservations def _quota_reservations_query(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update') @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_commit(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): usages = _get_user_quota_usages(context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_rollback(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): usages = _get_user_quota_usages(context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_admin_context def quota_destroy_all_by_project_and_user(context, project_id, user_id): session = get_session() with session.begin(): model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).soft_delete(synchronize_session=False) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() reservation_query = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time) for reservation in reservation_query.join(models.QuotaUsage).all(): if reservation.delta >= 0: reservation.usage.reserved -= reservation.delta session.add(reservation.usage) reservation_query.soft_delete(synchronize_session=False) ################ def extract_instance_values(values, fields): instance_values = {} for field in fields: field_value = values.pop(field, None) if field_value: instance_values.update({field: field_value}) return instance_values def extract_share_instance_values(values): share_instance_model_fields = [ 'status', 'host', 'scheduled_at', 'launched_at', 'terminated_at', 'share_server_id', 'share_network_id', 'availability_zone' ] return extract_instance_values(values, share_instance_model_fields) def extract_snapshot_instance_values(values): fields = ['status', 'progress', 'provider_location'] return extract_instance_values(values, fields) ################ @require_context def share_instance_create(context, share_id, values): session = get_session() with session.begin(): return _share_instance_create(context, share_id, values, session) def _share_instance_create(context, share_id, values, session): if not values.get('id'): values['id'] = uuidutils.generate_uuid() values.update({'share_id': share_id}) share_instance_ref = models.ShareInstance() share_instance_ref.update(values) share_instance_ref.save(session=session) return share_instance_get(context, share_instance_ref['id'], session=session) @require_context def share_instance_update(context, share_instance_id, values, with_share_data=False): session = get_session() ensure_availability_zone_exists(context, values, session, strict=False) with session.begin(): instance_ref = _share_instance_update( context, share_instance_id, values, session ) if with_share_data: parent_share = share_get(context, instance_ref['share_id'], session=session) instance_ref.set_share_data(parent_share) return instance_ref @require_context def _share_instance_update(context, share_instance_id, values, session): share_instance_ref = share_instance_get(context, share_instance_id, session=session) share_instance_ref.update(values) share_instance_ref.save(session=session) return share_instance_ref @require_context def share_instance_get(context, share_instance_id, session=None, with_share_data=False): if session is None: session = get_session() result = model_query( context, models.ShareInstance, session=session, ).filter_by( id=share_instance_id, ).options( joinedload('export_locations'), ).first() if result is None: raise exception.NotFound() if with_share_data: parent_share = share_get(context, result['share_id'], session=session) result.set_share_data(parent_share) return result @require_admin_context def share_instances_get_all(context): session = get_session() return model_query( context, models.ShareInstance, session=session, read_deleted="no", ).options( joinedload('export_locations'), ).all() @require_context def share_instance_delete(context, instance_id, session=None): if session is None: session = get_session() with session.begin(): share_export_locations_update(context, instance_id, [], delete=True) instance_ref = share_instance_get(context, instance_id, session=session) instance_ref.soft_delete(session=session, update_status=True) share = share_get(context, instance_ref['share_id'], session=session) if len(share.instances) == 0: share.soft_delete(session=session) share_access_delete_all_by_share(context, share['id']) @require_admin_context def share_instances_get_all_by_host(context, host): """Retrieves all share instances hosted on a host.""" result = ( model_query(context, models.ShareInstance).filter( or_( models.ShareInstance.host == host, models.ShareInstance.host.like("{0}#%".format(host)) ) ).all() ) return result @require_context def share_instances_get_all_by_share_network(context, share_network_id): """Returns list of share instances that belong to given share network.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_network_id == share_network_id, ).all() ) return result @require_context def share_instances_get_all_by_share_server(context, share_server_id): """Returns list of share instance with given share server.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_server_id == share_server_id, ).all() ) return result @require_context def share_instances_get_all_by_share(context, share_id): """Returns list of share instances that belong to given share.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_id == share_id, ).all() ) return result @require_context def share_instances_get_all_by_consistency_group_id(context, cg_id): """Returns list of share instances that belong to given cg.""" result = ( model_query(context, models.Share).filter( models.Share.consistency_group_id == cg_id, ).all() ) instances = [] for share in result: instance = share.instance instance.set_share_data(share) instances.append(instance) return instances ################ def _share_replica_get_with_filters(context, share_id=None, replica_id=None, replica_state=None, status=None, with_share_server=True, session=None): query = model_query(context, models.ShareInstance, session=session, read_deleted="no") if share_id is not None: query = query.filter(models.ShareInstance.share_id == share_id) if replica_id is not None: query = query.filter(models.ShareInstance.id == replica_id) if replica_state is not None: query = query.filter( models.ShareInstance.replica_state == replica_state) else: query = query.filter(models.ShareInstance.replica_state.isnot(None)) if status is not None: query = query.filter(models.ShareInstance.status == status) if with_share_server: query = query.options(joinedload('share_server')) return query def _set_replica_share_data(context, replicas, session): if replicas and not isinstance(replicas, list): replicas = [replicas] for replica in replicas: parent_share = share_get(context, replica['share_id'], session=session) replica.set_share_data(parent_share) return replicas @require_context def share_replicas_get_all(context, with_share_data=False, with_share_server=True, session=None): """Returns replica instances for all available replicated shares.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, session=session).all() if with_share_data: result = _set_replica_share_data(context, result, session) return result @require_context def share_replicas_get_all_by_share(context, share_id, with_share_data=False, with_share_server=False, session=None): """Returns replica instances for a given share.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, share_id=share_id, session=session).all() if with_share_data: result = _set_replica_share_data(context, result, session) return result @require_context def share_replicas_get_available_active_replica(context, share_id, with_share_data=False, with_share_server=False, session=None): """Returns an 'active' replica instance that is 'available'.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, share_id=share_id, replica_state=constants.REPLICA_STATE_ACTIVE, status=constants.STATUS_AVAILABLE, session=session).first() if result and with_share_data: result = _set_replica_share_data(context, result, session)[0] return result @require_context def share_replicas_get_active_replicas_by_share(context, share_id, with_share_data=False, with_share_server=False, session=None): """Returns all active replicas for a given share.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, share_id=share_id, replica_state=constants.REPLICA_STATE_ACTIVE, session=session).all() if with_share_data: result = _set_replica_share_data(context, result, session) return result @require_context def share_replica_get(context, replica_id, with_share_data=False, with_share_server=False, session=None): """Returns summary of requested replica if available.""" session = session or get_session() result = _share_replica_get_with_filters( context, with_share_server=with_share_server, replica_id=replica_id, session=session).first() if result is None: raise exception.ShareReplicaNotFound(replica_id=replica_id) if with_share_data: result = _set_replica_share_data(context, result, session)[0] return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_replica_update(context, share_replica_id, values, with_share_data=False, session=None): """Updates a share replica with specified values.""" session = session or get_session() with session.begin(): ensure_availability_zone_exists(context, values, session, strict=False) updated_share_replica = _share_instance_update( context, share_replica_id, values, session=session) if with_share_data: updated_share_replica = _set_replica_share_data( context, updated_share_replica, session)[0] return updated_share_replica @require_context def share_replica_delete(context, share_replica_id, session=None): """Deletes a share replica.""" session = session or get_session() share_instance_delete(context, share_replica_id, session=session) ################ def _share_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.Share, session=session).\ options(joinedload('share_metadata')).\ options(joinedload('share_type')) def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): value = six.text_type(v) if isinstance(v, bool) else v metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = value metadata_refs.append(metadata_ref) return metadata_refs @require_context def share_create(context, values, create_share_instance=True): values = ensure_model_dict_has_id(values) values['share_metadata'] = _metadata_refs(values.get('metadata'), models.ShareMetadata) session = get_session() share_ref = models.Share() share_instance_values = extract_share_instance_values(values) ensure_availability_zone_exists(context, share_instance_values, session, strict=False) share_ref.update(values) with session.begin(): share_ref.save(session=session) if create_share_instance: _share_instance_create(context, share_ref['id'], share_instance_values, session=session) # NOTE(u_glide): Do so to prevent errors with relationships return share_get(context, share_ref['id'], session=session) @require_admin_context def share_data_get_for_project(context, project_id, user_id, session=None): query = model_query(context, models.Share, func.count(models.Share.id), func.sum(models.Share.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if user_id: result = query.filter_by(user_id=user_id).first() else: result = query.first() return (result[0] or 0, result[1] or 0) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_update(context, share_id, values): session = get_session() share_instance_values = extract_share_instance_values(values) ensure_availability_zone_exists(context, share_instance_values, session, strict=False) with session.begin(): share_ref = share_get(context, share_id, session=session) _share_instance_update(context, share_ref.instance['id'], share_instance_values, session=session) share_ref.update(values) share_ref.save(session=session) return share_ref @require_context def share_get(context, share_id, session=None): result = _share_get_query(context, session).filter_by(id=share_id).first() if result is None: raise exception.NotFound() return result @require_context def _share_get_all_with_filters(context, project_id=None, share_server_id=None, consistency_group_id=None, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns sorted list of shares that satisfies filters. :param context: context to query under :param project_id: project id that owns shares :param share_server_id: share server that hosts shares :param filters: dict of filters to specify share selection :param is_public: public shares from other projects will be added to result if True :param sort_key: key of models.Share to be used for sorting :param sort_dir: desired direction of sorting, can be 'asc' and 'desc' :returns: list -- models.Share :raises: exception.InvalidInput """ if not sort_key: sort_key = 'created_at' if not sort_dir: sort_dir = 'desc' query = ( _share_get_query(context).join( models.ShareInstance, models.ShareInstance.share_id == models.Share.id ) ) if project_id: if is_public: query = query.filter(or_(models.Share.project_id == project_id, models.Share.is_public)) else: query = query.filter(models.Share.project_id == project_id) if share_server_id: query = query.filter( models.ShareInstance.share_server_id == share_server_id) if consistency_group_id: query = query.filter( models.Share.consistency_group_id == consistency_group_id) # Apply filters if not filters: filters = {} if 'metadata' in filters: for k, v in filters['metadata'].items(): query = query.filter( or_(models.Share.share_metadata.any( # pylint: disable=E1101 key=k, value=v))) if 'extra_specs' in filters: query = query.join( models.ShareTypeExtraSpecs, models.ShareTypeExtraSpecs.share_type_id == models.Share.share_type_id) for k, v in filters['extra_specs'].items(): query = query.filter(or_(models.ShareTypeExtraSpecs.key == k, models.ShareTypeExtraSpecs.value == v)) # Apply sorting if sort_dir.lower() not in ('desc', 'asc'): msg = _("Wrong sorting data provided: sort key is '%(sort_key)s' " "and sort direction is '%(sort_dir)s'.") % { "sort_key": sort_key, "sort_dir": sort_dir} raise exception.InvalidInput(reason=msg) def apply_sorting(model, query): sort_attr = getattr(model, sort_key) sort_method = getattr(sort_attr, sort_dir.lower()) return query.order_by(sort_method()) try: query = apply_sorting(models.Share, query) except AttributeError: try: query = apply_sorting(models.ShareInstance, query) except AttributeError: msg = _("Wrong sorting key provided - '%s'.") % sort_key raise exception.InvalidInput(reason=msg) # Returns list of shares that satisfy filters. query = query.all() return query @require_admin_context def share_get_all(context, filters=None, sort_key=None, sort_dir=None): query = _share_get_all_with_filters( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_context def share_get_all_by_project(context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns list of shares with given project ID.""" query = _share_get_all_with_filters( context, project_id=project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir, ) return query @require_context def share_get_all_by_consistency_group_id(context, cg_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given CG ID.""" query = _share_get_all_with_filters( context, consistency_group_id=cg_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) return query @require_context def share_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given share server.""" query = _share_get_all_with_filters( context, share_server_id=share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) return query @require_context def share_delete(context, share_id): session = get_session() with session.begin(): share_ref = share_get(context, share_id, session) if len(share_ref.instances) > 0: msg = _("Share %(id)s has %(count)s share instances.") % { 'id': share_id, 'count': len(share_ref.instances)} raise exception.InvalidShare(msg) share_ref.soft_delete(session=session) session.query(models.ShareMetadata).\ filter_by(share_id=share_id).soft_delete() ################### def _share_access_get_query(context, session, values, read_deleted='no'): """Get access record.""" query = model_query(context, models.ShareAccessMapping, session=session, read_deleted=read_deleted) return query.filter_by(**values) def _share_instance_access_query(context, session, access_id=None, instance_id=None): filters = {} if access_id is not None: filters.update({'access_id': access_id}) if instance_id is not None: filters.update({'share_instance_id': instance_id}) return model_query(context, models.ShareInstanceAccessMapping, session=session).filter_by(**filters) @require_context def share_access_create(context, values): values = ensure_model_dict_has_id(values) session = get_session() with session.begin(): access_ref = models.ShareAccessMapping() access_ref.update(values) access_ref.save(session=session) parent_share = share_get(context, values['share_id'], session=session) for instance in parent_share.instances: vals = { 'share_instance_id': instance['id'], 'access_id': access_ref['id'], } _share_instance_access_create(vals, session) return share_access_get(context, access_ref['id']) def share_instance_access_copy(context, share_id, instance_id, session=None): """Copy access rules from share to share instance.""" session = session or get_session() share_access_rules = share_access_get_all_for_share( context, share_id, session=session) for access_rule in share_access_rules: values = { 'share_instance_id': instance_id, 'access_id': access_rule['id'], } _share_instance_access_create(values, session) return share_access_rules def _share_instance_access_create(values, session): access_ref = models.ShareInstanceAccessMapping() access_ref.update(ensure_model_dict_has_id(values)) access_ref.save(session=session) return access_ref @require_context def share_access_get(context, access_id): """Get access record.""" session = get_session() access = _share_access_get_query( context, session, {'id': access_id}).first() if access: return access else: raise exception.NotFound() @require_context def share_instance_access_get(context, access_id, instance_id): """Get access record.""" session = get_session() access = _share_instance_access_query(context, session, access_id, instance_id).first() if access: return access else: raise exception.NotFound() @require_context def share_access_get_all_for_share(context, share_id, session=None): session = session or get_session() return _share_access_get_query(context, session, {'share_id': share_id}).all() @require_context def share_access_get_all_for_instance(context, instance_id, session=None): """Get all access rules related to a certain share instance.""" session = get_session() return _share_access_get_query(context, session, {}).join( models.ShareInstanceAccessMapping, models.ShareInstanceAccessMapping.access_id == models.ShareAccessMapping.id).filter( models.ShareInstanceAccessMapping.share_instance_id == instance_id).all() @require_context def share_instance_access_get_all(context, access_id, session=None): if not session: session = get_session() return _share_instance_access_query(context, session, access_id).all() @require_context def share_access_get_all_by_type_and_access(context, share_id, access_type, access): session = get_session() return _share_access_get_query(context, session, {'share_id': share_id, 'access_type': access_type, 'access_to': access}).all() @require_context def share_access_delete(context, access_id): session = get_session() with session.begin(): mappings = share_instance_access_get_all(context, access_id, session) if len(mappings) > 0: msg = (_("Access rule %s has mappings" " to share instances.") % access_id) raise exception.InvalidShareAccess(msg) session.query(models.ShareAccessMapping).\ filter_by(id=access_id).soft_delete() @require_context def share_access_delete_all_by_share(context, share_id): session = get_session() with session.begin(): session.query(models.ShareAccessMapping). \ filter_by(share_id=share_id).soft_delete() @require_context def share_instance_access_delete(context, mapping_id): session = get_session() with session.begin(): mapping = session.query(models.ShareInstanceAccessMapping).\ filter_by(id=mapping_id).first() if not mapping: exception.NotFound() mapping.soft_delete(session) other_mappings = share_instance_access_get_all( context, mapping['access_id'], session) # NOTE(u_glide): Remove access rule if all mappings were removed. if len(other_mappings) == 0: ( session.query(models.ShareAccessMapping) .filter_by(id=mapping['access_id']) .soft_delete() ) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_instance_update_access_status(context, share_instance_id, status): session = get_session() with session.begin(): mapping = session.query(models.ShareInstance).\ filter_by(id=share_instance_id).first() mapping.update({'access_rules_status': status}) mapping.save(session=session) return mapping ################### @require_context def share_snapshot_instance_create(context, snapshot_id, values, session=None): session = session or get_session() values = copy.deepcopy(values) if not values.get('id'): values['id'] = uuidutils.generate_uuid() values.update({'snapshot_id': snapshot_id}) instance_ref = models.ShareSnapshotInstance() instance_ref.update(values) instance_ref.save(session=session) return share_snapshot_instance_get(context, instance_ref['id'], session=session) @require_context def share_snapshot_instance_update(context, instance_id, values): session = get_session() instance_ref = share_snapshot_instance_get(context, instance_id, session=session) # NOTE(u_glide): Ignore updates to custom properties for extra_key in models.ShareSnapshotInstance._extra_keys: if extra_key in values: values.pop(extra_key) instance_ref.update(values) instance_ref.save(session=session) return instance_ref @require_context def share_snapshot_instance_delete(context, snapshot_instance_id, session=None): session = session or get_session() with session.begin(): snapshot_instance_ref = share_snapshot_instance_get( context, snapshot_instance_id, session=session) snapshot_instance_ref.soft_delete( session=session, update_status=True) snapshot = share_snapshot_get( context, snapshot_instance_ref['snapshot_id'], session=session) if len(snapshot.instances) == 0: snapshot.soft_delete(session=session) @require_context def share_snapshot_instance_get(context, snapshot_instance_id, session=None, with_share_data=False): session = session or get_session() result = _share_snapshot_instance_get_with_filters( context, instance_ids=[snapshot_instance_id], session=session).first() if result is None: raise exception.ShareSnapshotInstanceNotFound( instance_id=snapshot_instance_id) if with_share_data: result = _set_share_snapshot_instance_data(context, result, session)[0] return result @require_context def share_snapshot_instance_get_all_with_filters(context, search_filters, with_share_data=False, session=None): """Get snapshot instances filtered by known attrs, ignore unknown attrs. All filters accept list/tuples to filter on, along with simple values. """ def listify(values): if values: if not isinstance(values, (list, tuple, set)): return values, else: return values session = session or get_session() _known_filters = ('instance_ids', 'snapshot_ids', 'share_instance_ids', 'statuses') filters = {k: listify(search_filters.get(k)) for k in _known_filters} result = _share_snapshot_instance_get_with_filters( context, session=session, **filters).all() if with_share_data: result = _set_share_snapshot_instance_data(context, result, session) return result def _share_snapshot_instance_get_with_filters(context, instance_ids=None, snapshot_ids=None, statuses=None, share_instance_ids=None, session=None): query = model_query(context, models.ShareSnapshotInstance, session=session, read_deleted="no") if instance_ids is not None: query = query.filter( models.ShareSnapshotInstance.id.in_(instance_ids)) if snapshot_ids is not None: query = query.filter( models.ShareSnapshotInstance.snapshot_id.in_(snapshot_ids)) if share_instance_ids is not None: query = query.filter(models.ShareSnapshotInstance.share_instance_id .in_(share_instance_ids)) if statuses is not None: query = query.filter(models.ShareSnapshotInstance.status.in_(statuses)) return query def _set_share_snapshot_instance_data(context, snapshot_instances, session): if snapshot_instances and not isinstance(snapshot_instances, list): snapshot_instances = [snapshot_instances] for snapshot_instance in snapshot_instances: share_instance = share_instance_get( context, snapshot_instance['share_instance_id'], session=session, with_share_data=True) snapshot_instance['share'] = share_instance return snapshot_instances ################### @require_context def share_snapshot_create(context, values, create_snapshot_instance=True): values = ensure_model_dict_has_id(values) snapshot_ref = models.ShareSnapshot() snapshot_instance_values = extract_snapshot_instance_values(values) share_ref = share_get(context, values.get('share_id')) snapshot_instance_values.update( {'share_instance_id': share_ref.instance.id} ) snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) if create_snapshot_instance: share_snapshot_instance_create( context, snapshot_ref['id'], snapshot_instance_values, session=session ) return share_snapshot_get(context, values['id'], session=session) @require_admin_context def snapshot_data_get_for_project(context, project_id, user_id, session=None): query = model_query(context, models.ShareSnapshot, func.count(models.ShareSnapshot.id), func.sum(models.ShareSnapshot.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if user_id: result = query.filter_by(user_id=user_id).first() else: result = query.first() return (result[0] or 0, result[1] or 0) @require_context def share_snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): snapshot_ref = share_snapshot_get(context, snapshot_id, session=session) if len(snapshot_ref.instances) > 0: msg = _("Snapshot %(id)s has %(count)s snapshot instances.") % { 'id': snapshot_id, 'count': len(snapshot_ref.instances)} raise exception.InvalidShareSnapshot(msg) snapshot_ref.soft_delete(session=session) @require_context def share_snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.ShareSnapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ options(joinedload('share')).\ first() if not result: raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) return result def _share_snapshot_get_all_with_filters(context, project_id=None, share_id=None, filters=None, sort_key=None, sort_dir=None): # Init data sort_key = sort_key or 'share_id' sort_dir = sort_dir or 'desc' filters = filters or {} query = model_query(context, models.ShareSnapshot) if project_id: query = query.filter_by(project_id=project_id) if share_id: query = query.filter_by(share_id=share_id) query = query.options(joinedload('share')) # Apply filters if 'usage' in filters: usage_filter_keys = ['any', 'used', 'unused'] if filters['usage'] == 'any': pass elif filters['usage'] == 'used': query = query.filter(or_(models.Share.snapshot_id == ( models.ShareSnapshot.id))) elif filters['usage'] == 'unused': query = query.filter(or_(models.Share.snapshot_id != ( models.ShareSnapshot.id))) else: msg = _("Wrong 'usage' key provided - '%(key)s'. " "Expected keys are '%(ek)s'.") % { 'key': filters['usage'], 'ek': six.text_type(usage_filter_keys)} raise exception.InvalidInput(reason=msg) # Apply sorting try: attr = getattr(models.ShareSnapshot, sort_key) except AttributeError: msg = _("Wrong sorting key provided - '%s'.") % sort_key raise exception.InvalidInput(reason=msg) if sort_dir.lower() == 'desc': query = query.order_by(attr.desc()) elif sort_dir.lower() == 'asc': query = query.order_by(attr.asc()) else: msg = _("Wrong sorting data provided: sort key is '%(sort_key)s' " "and sort direction is '%(sort_dir)s'.") % { "sort_key": sort_key, "sort_dir": sort_dir} raise exception.InvalidInput(reason=msg) # Returns list of shares that satisfy filters return query.all() @require_admin_context def share_snapshot_get_all(context, filters=None, sort_key=None, sort_dir=None): return _share_snapshot_get_all_with_filters( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) @require_context def share_snapshot_get_all_by_project(context, project_id, filters=None, sort_key=None, sort_dir=None): authorize_project_context(context, project_id) return _share_snapshot_get_all_with_filters( context, project_id=project_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) @require_context def share_snapshot_get_all_for_share(context, share_id, filters=None, sort_key=None, sort_dir=None): return _share_snapshot_get_all_with_filters( context, share_id=share_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) @require_context def share_snapshot_data_get_for_project(context, project_id, session=None): authorize_project_context(context, project_id) result = model_query(context, models.ShareSnapshot, func.count(models.ShareSnapshot.id), func.sum(models.ShareSnapshot.share_size), read_deleted="no", session=session).\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = share_snapshot_get(context, snapshot_id, session=session) instance_values = extract_snapshot_instance_values(values) if values: snapshot_ref.update(values) snapshot_ref.save(session=session) if instance_values: snapshot_ref.instance.update(instance_values) snapshot_ref.instance.save(session=session) return snapshot_ref ################################# @require_context @require_share_exists def share_metadata_get(context, share_id): return _share_metadata_get(context, share_id) @require_context @require_share_exists def share_metadata_delete(context, share_id, key): _share_metadata_get_query(context, share_id).\ filter_by(key=key).soft_delete() @require_context @require_share_exists def share_metadata_update(context, share_id, metadata, delete): return _share_metadata_update(context, share_id, metadata, delete) def _share_metadata_get_query(context, share_id, session=None): return model_query(context, models.ShareMetadata, session=session, read_deleted="no").\ filter_by(share_id=share_id).\ options(joinedload('share')) @require_context @require_share_exists def _share_metadata_get(context, share_id, session=None): rows = _share_metadata_get_query(context, share_id, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_share_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_metadata_update(context, share_id, metadata, delete, session=None): if not session: session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _share_metadata_get(context, share_id, session=session) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _share_metadata_get_item(context, share_id, meta_key, session=session) meta_ref.soft_delete(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _share_metadata_get_item(context, share_id, meta_key, session=session) except exception.ShareMetadataNotFound: meta_ref = models.ShareMetadata() item.update({"key": meta_key, "share_id": share_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata def _share_metadata_get_item(context, share_id, key, session=None): result = _share_metadata_get_query(context, share_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.ShareMetadataNotFound(metadata_key=key, share_id=share_id) return result ############################ # Export locations functions ############################ def _share_export_locations_get(context, share_instance_ids, include_admin_only=True, session=None): session = session or get_session() if not isinstance(share_instance_ids, (set, list, tuple)): share_instance_ids = (share_instance_ids, ) query = model_query( context, models.ShareInstanceExportLocations, session=session, read_deleted="no", ).filter( models.ShareInstanceExportLocations.share_instance_id.in_( share_instance_ids), ).order_by( "updated_at", ).options( joinedload("_el_metadata_bare"), ) if not include_admin_only: query = query.filter_by(is_admin_only=False) return query.all() @require_context @require_share_exists def share_export_locations_get_by_share_id(context, share_id, include_admin_only=True): share = share_get(context, share_id) ids = [instance.id for instance in share.instances] rows = _share_export_locations_get( context, ids, include_admin_only=include_admin_only) return rows @require_context @require_share_instance_exists def share_export_locations_get_by_share_instance_id(context, share_instance_id): rows = _share_export_locations_get( context, [share_instance_id], include_admin_only=True) return rows @require_context @require_share_exists def share_export_locations_get(context, share_id): # NOTE(vponomaryov): this method is kept for compatibility with # old approach. New one uses 'share_export_locations_get_by_share_id'. # Which returns list of dicts instead of list of strings, as this one does. share = share_get(context, share_id) rows = _share_export_locations_get( context, share.instance.id, context.is_admin) return [location['path'] for location in rows] @require_context def share_export_location_get_by_uuid(context, export_location_uuid, session=None): session = session or get_session() query = model_query( context, models.ShareInstanceExportLocations, session=session, read_deleted="no", ).filter_by( uuid=export_location_uuid, ).options( joinedload("_el_metadata_bare"), ) result = query.first() if not result: raise exception.ExportLocationNotFound(uuid=export_location_uuid) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_export_locations_update(context, share_instance_id, export_locations, delete): # NOTE(u_glide): # Backward compatibility code for drivers, # which return single export_location as string if not isinstance(export_locations, (list, tuple, set)): export_locations = (export_locations, ) export_locations_as_dicts = [] for el in export_locations: # NOTE(vponomaryov): transform old export locations view to new one export_location = el if isinstance(el, six.string_types): export_location = { "path": el, "is_admin_only": False, "metadata": {}, } elif isinstance(export_location, dict): if 'metadata' not in export_location: export_location['metadata'] = {} else: raise exception.ManilaException( _("Wrong export location type '%s'.") % type(export_location)) export_locations_as_dicts.append(export_location) export_locations = export_locations_as_dicts export_locations_paths = [el['path'] for el in export_locations] session = get_session() current_el_rows = _share_export_locations_get( context, share_instance_id, session=session) def get_path_list_from_rows(rows): return set([l['path'] for l in rows]) current_el_paths = get_path_list_from_rows(current_el_rows) def create_indexed_time_dict(key_list): base = timeutils.utcnow() return { # NOTE(u_glide): Incrementing timestamp by microseconds to make # timestamp order match index order. key: base + datetime.timedelta(microseconds=index) for index, key in enumerate(key_list) } indexed_update_time = create_indexed_time_dict(export_locations_paths) for el in current_el_rows: if delete and el['path'] not in export_locations_paths: export_location_metadata_delete(context, el['uuid']) el.soft_delete(session) else: updated_at = indexed_update_time[el['path']] el.update({ 'updated_at': updated_at, 'deleted': 0, }) el.save(session=session) if el['el_metadata']: export_location_metadata_update( context, el['uuid'], el['el_metadata'], session=session) # Now add new export locations for el in export_locations: if el['path'] in current_el_paths: # Already updated continue location_ref = models.ShareInstanceExportLocations() location_ref.update({ 'uuid': uuidutils.generate_uuid(), 'path': el['path'], 'share_instance_id': share_instance_id, 'updated_at': indexed_update_time[el['path']], 'deleted': 0, 'is_admin_only': el.get('is_admin_only', False), }) location_ref.save(session=session) if not el.get('metadata'): continue export_location_metadata_update( context, location_ref['uuid'], el.get('metadata'), session=session) return get_path_list_from_rows(_share_export_locations_get( context, share_instance_id, session=session)) ##################################### # Export locations metadata functions ##################################### def _export_location_metadata_get_query(context, export_location_uuid, session=None): session = session or get_session() export_location_id = share_export_location_get_by_uuid( context, export_location_uuid).id return model_query( context, models.ShareInstanceExportLocationsMetadata, session=session, read_deleted="no", ).filter_by( export_location_id=export_location_id, ) @require_context def export_location_metadata_get(context, export_location_uuid, session=None): rows = _export_location_metadata_get_query( context, export_location_uuid, session=session).all() result = {} for row in rows: result[row["key"]] = row["value"] return result @require_context def export_location_metadata_delete(context, export_location_uuid, keys=None): session = get_session() metadata = _export_location_metadata_get_query( context, export_location_uuid, session=session, ) # NOTE(vponomaryov): if keys is None then we delete all metadata. if keys is not None: keys = keys if isinstance(keys, (list, set, tuple)) else (keys, ) metadata = metadata.filter( models.ShareInstanceExportLocationsMetadata.key.in_(keys)) metadata = metadata.all() for meta_ref in metadata: meta_ref.soft_delete(session=session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def export_location_metadata_update(context, export_location_uuid, metadata, delete=False, session=None): session = session or get_session() if delete: original_metadata = export_location_metadata_get( context, export_location_uuid, session=session) keys_for_deletion = set(original_metadata).difference(metadata) if keys_for_deletion: export_location_metadata_delete( context, export_location_uuid, keys=keys_for_deletion) el = share_export_location_get_by_uuid(context, export_location_uuid) for meta_key, meta_value in metadata.items(): # NOTE(vponomaryov): we should use separate session # for each meta_ref because of autoincrement of integer primary key # that will not take effect using one session and we will rewrite, # in that case, single record - first one added with this call. session = get_session() item = {"value": meta_value, "updated_at": timeutils.utcnow()} meta_ref = _export_location_metadata_get_query( context, export_location_uuid, session=session, ).filter_by( key=meta_key, ).first() if not meta_ref: meta_ref = models.ShareInstanceExportLocationsMetadata() item.update({ "key": meta_key, "export_location_id": el.id, }) meta_ref.update(item) meta_ref.save(session=session) return metadata ################################### @require_context def security_service_create(context, values): values = ensure_model_dict_has_id(values) security_service_ref = models.SecurityService() security_service_ref.update(values) session = get_session() with session.begin(): security_service_ref.save(session=session) return security_service_ref @require_context def security_service_delete(context, id): session = get_session() with session.begin(): security_service_ref = security_service_get(context, id, session=session) security_service_ref.soft_delete(session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def security_service_update(context, id, values): session = get_session() with session.begin(): security_service_ref = security_service_get(context, id, session=session) security_service_ref.update(values) security_service_ref.save(session=session) return security_service_ref @require_context def security_service_get(context, id, session=None): result = _security_service_get_query(context, session=session).\ filter_by(id=id).first() if result is None: raise exception.SecurityServiceNotFound(security_service_id=id) return result @require_context def security_service_get_all(context): return _security_service_get_query(context).all() @require_context def security_service_get_all_by_project(context, project_id): return _security_service_get_query(context).\ filter_by(project_id=project_id).all() def _security_service_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.SecurityService, session=session) ################### def _network_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.ShareNetwork, session=session).\ options(joinedload('share_instances'), joinedload('security_services'), joinedload('share_servers')) @require_context def share_network_create(context, values): values = ensure_model_dict_has_id(values) network_ref = models.ShareNetwork() network_ref.update(values) session = get_session() with session.begin(): network_ref.save(session=session) return share_network_get(context, values['id'], session) @require_context def share_network_delete(context, id): session = get_session() with session.begin(): network_ref = share_network_get(context, id, session=session) network_ref.soft_delete(session) @require_context def share_network_update(context, id, values): session = get_session() with session.begin(): network_ref = share_network_get(context, id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref @require_context def share_network_get(context, id, session=None): result = _network_get_query(context, session).filter_by(id=id).first() if result is None: raise exception.ShareNetworkNotFound(share_network_id=id) return result @require_context def share_network_get_all(context): return _network_get_query(context).all() @require_context def share_network_get_all_by_project(context, project_id, user_id=None, session=None): query = _network_get_query(context, session) query = query.filter_by(project_id=project_id) if user_id is not None: query = query.filter_by(user_id=user_id) return query.all() @require_context def share_network_get_all_by_security_service(context, security_service_id): session = get_session() return model_query(context, models.ShareNetwork, session=session).\ join(models.ShareNetworkSecurityServiceAssociation, models.ShareNetwork.id == models.ShareNetworkSecurityServiceAssociation.share_network_id).\ filter_by(security_service_id=security_service_id, deleted=0).\ options(joinedload('share_servers')).all() @require_context def share_network_add_security_service(context, id, security_service_id): session = get_session() with session.begin(): assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, session=session).\ filter_by(share_network_id=id).\ filter_by(security_service_id=security_service_id).first() if assoc_ref: msg = "Already associated" raise exception.ShareNetworkSecurityServiceAssociationError( share_network_id=id, security_service_id=security_service_id, reason=msg) share_nw_ref = share_network_get(context, id, session=session) security_service_ref = security_service_get(context, security_service_id, session=session) share_nw_ref.security_services += [security_service_ref] share_nw_ref.save(session=session) return share_nw_ref @require_context def share_network_remove_security_service(context, id, security_service_id): session = get_session() with session.begin(): share_nw_ref = share_network_get(context, id, session=session) security_service_get(context, security_service_id, session=session) assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, session=session).\ filter_by(share_network_id=id).\ filter_by(security_service_id=security_service_id).first() if assoc_ref: assoc_ref.soft_delete(session) else: msg = "No association defined" raise exception.ShareNetworkSecurityServiceDissociationError( share_network_id=id, security_service_id=security_service_id, reason=msg) return share_nw_ref ################### def _server_get_query(context, session=None): if session is None: session = get_session() return model_query(context, models.ShareServer, session=session).\ options(joinedload('share_instances'), joinedload('network_allocations'), joinedload('share_network')) @require_context def share_server_create(context, values): values = ensure_model_dict_has_id(values) server_ref = models.ShareServer() server_ref.update(values) session = get_session() with session.begin(): server_ref.save(session=session) # NOTE(u_glide): Do so to prevent errors with relationships return share_server_get(context, server_ref['id'], session=session) @require_context def share_server_delete(context, id): session = get_session() with session.begin(): server_ref = share_server_get(context, id, session=session) share_server_backend_details_delete(context, id, session=session) server_ref.soft_delete(session=session, update_status=True) @require_context def share_server_update(context, id, values): session = get_session() with session.begin(): server_ref = share_server_get(context, id, session=session) server_ref.update(values) server_ref.save(session=session) return server_ref @require_context def share_server_get(context, server_id, session=None): result = _server_get_query(context, session).filter_by(id=server_id)\ .first() if result is None: raise exception.ShareServerNotFound(share_server_id=server_id) return result @require_context def share_server_get_all_by_host_and_share_net_valid(context, host, share_net_id, session=None): result = _server_get_query(context, session).filter_by(host=host)\ .filter_by(share_network_id=share_net_id)\ .filter(models.ShareServer.status.in_( (constants.STATUS_CREATING, constants.STATUS_ACTIVE))).all() if not result: filters_description = ('share_network_id is "%(share_net_id)s",' ' host is "%(host)s" and status in' ' "%(status_cr)s" or "%(status_act)s"') % { 'share_net_id': share_net_id, 'host': host, 'status_cr': constants.STATUS_CREATING, 'status_act': constants.STATUS_ACTIVE, } raise exception.ShareServerNotFoundByFilters( filters_description=filters_description) return result @require_context def share_server_get_all(context): return _server_get_query(context).all() @require_context def share_server_get_all_by_host(context, host): return _server_get_query(context).filter_by(host=host).all() @require_context def share_server_get_all_unused_deletable(context, host, updated_before): valid_server_status = ( constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR, ) result = _server_get_query(context)\ .filter_by(host=host)\ .filter(~models.ShareServer.consistency_groups.any())\ .filter(~models.ShareServer.share_instances.any())\ .filter(models.ShareServer.status.in_(valid_server_status))\ .filter(models.ShareServer.updated_at < updated_before).all() return result @require_context def share_server_backend_details_set(context, share_server_id, server_details): share_server_get(context, share_server_id) for meta_key, meta_value in server_details.items(): meta_ref = models.ShareServerBackendDetails() meta_ref.update({ 'key': meta_key, 'value': meta_value, 'share_server_id': share_server_id }) session = get_session() with session.begin(): meta_ref.save(session) return server_details @require_context def share_server_backend_details_delete(context, share_server_id, session=None): if not session: session = get_session() share_server_details = model_query(context, models.ShareServerBackendDetails, session=session)\ .filter_by(share_server_id=share_server_id).all() for item in share_server_details: item.soft_delete(session) ################### def _driver_private_data_query(session, context, host, entity_id, key=None, read_deleted=False): query = model_query( context, models.DriverPrivateData, session=session, read_deleted=read_deleted, ).filter_by( entity_uuid=entity_id, ) if isinstance(key, list): return query.filter(models.DriverPrivateData.key.in_(key)) elif key is not None: return query.filter_by(key=key) return query @require_context def driver_private_data_get(context, host, entity_id, key=None, default=None, session=None): if not session: session = get_session() query = _driver_private_data_query(session, context, host, entity_id, key) if key is None or isinstance(key, list): return {item.key: item.value for item in query.all()} else: result = query.first() return result["value"] if result is not None else default @require_context def driver_private_data_update(context, host, entity_id, details, delete_existing=False, session=None): # NOTE(u_glide): following code modifies details dict, that's why we should # copy it new_details = copy.deepcopy(details) if not session: session = get_session() with session.begin(): # Process existing data # NOTE(u_glide): read_deleted=None means here 'read all' original_data = _driver_private_data_query( session, context, host, entity_id, read_deleted=None).all() for data_ref in original_data: in_new_details = data_ref['key'] in new_details if in_new_details: new_value = six.text_type(new_details.pop(data_ref['key'])) data_ref.update({ "value": new_value, "deleted": 0, "deleted_at": None }) data_ref.save(session=session) elif delete_existing and data_ref['deleted'] != 1: data_ref.update({ "deleted": 1, "deleted_at": timeutils.utcnow() }) data_ref.save(session=session) # Add new data for key, value in new_details.items(): data_ref = models.DriverPrivateData() data_ref.update({ "host": host, "entity_uuid": entity_id, "key": key, "value": six.text_type(value) }) data_ref.save(session=session) return details @require_context def driver_private_data_delete(context, host, entity_id, key=None, session=None): if not session: session = get_session() with session.begin(): query = _driver_private_data_query(session, context, host, entity_id, key) query.update({"deleted": 1, "deleted_at": timeutils.utcnow()}) ################### @require_context def network_allocation_create(context, values): values = ensure_model_dict_has_id(values) alloc_ref = models.NetworkAllocation() alloc_ref.update(values) session = get_session() with session.begin(): alloc_ref.save(session=session) return alloc_ref @require_context def network_allocation_delete(context, id): session = get_session() with session.begin(): alloc_ref = network_allocation_get(context, id, session=session) alloc_ref.soft_delete(session) @require_context def network_allocation_get(context, id, session=None): if session is None: session = get_session() result = model_query(context, models.NetworkAllocation, session=session).\ filter_by(id=id).first() if result is None: raise exception.NotFound() return result @require_context def network_allocations_get_by_ip_address(context, ip_address): session = get_session() result = model_query(context, models.NetworkAllocation, session=session).\ filter_by(ip_address=ip_address).all() return result or [] @require_context def network_allocations_get_for_share_server(context, share_server_id, session=None, label=None): if session is None: session = get_session() query = model_query( context, models.NetworkAllocation, session=session, ).filter_by( share_server_id=share_server_id, ) if label: if label != 'admin': query = query.filter(or_( # NOTE(vponomaryov): we treat None as alias for 'user'. models.NetworkAllocation.label == None, # noqa models.NetworkAllocation.label == label, )) else: query = query.filter(models.NetworkAllocation.label == label) result = query.all() return result @require_context def network_allocation_update(context, id, values): session = get_session() with session.begin(): alloc_ref = network_allocation_get(context, id, session=session) alloc_ref.update(values) alloc_ref.save(session=session) return alloc_ref ################### def _dict_with_extra_specs(inst_type_query): """Convert type query result to dict with extra_spec and rate_limit. Takes a volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']} inst_type_dict['extra_specs'] = extra_specs return inst_type_dict @require_admin_context def share_type_create(context, values, projects=None): """Create a new share type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ values = ensure_model_dict_has_id(values) projects = projects or [] session = get_session() with session.begin(): try: values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.ShareTypeExtraSpecs) share_type_ref = models.ShareTypes() share_type_ref.update(values) share_type_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.ShareTypeExists(id=values['name']) except Exception as e: raise db_exception.DBError(e) for project in set(projects): access_ref = models.ShareTypeProjects() access_ref.update({"share_type_id": share_type_ref.id, "project_id": project}) access_ref.save(session=session) return share_type_ref def _share_type_get_query(context, session=None, read_deleted=None, expected_fields=None): expected_fields = expected_fields or [] query = model_query(context, models.ShareTypes, session=session, read_deleted=read_deleted). \ options(joinedload('extra_specs')) if 'projects' in expected_fields: query = query.options(joinedload('projects')) if not context.is_admin: the_filter = [models.ShareTypes.is_public == true()] projects_attr = getattr(models.ShareTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query @require_context def share_type_get_all(context, inactive=False, filters=None): """Returns a dict describing all share_types with name as key.""" filters = filters or {} read_deleted = "yes" if inactive else "no" query = _share_type_get_query(context, read_deleted=read_deleted) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models. ShareTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models. ShareTypes, 'projects') the_filter.extend([ projects_attr.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) rows = query.order_by("name").all() result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result def _share_type_get_id_from_share_type_query(context, id, session=None): return model_query( context, models.ShareTypes, read_deleted="no", session=session).\ filter_by(id=id) def _share_type_get_id_from_share_type(context, id, session=None): result = _share_type_get_id_from_share_type_query( context, id, session=session).first() if not result: raise exception.ShareTypeNotFound(share_type_id=id) return result['id'] @require_context def _share_type_get(context, id, session=None, inactive=False, expected_fields=None): expected_fields = expected_fields or [] read_deleted = "yes" if inactive else "no" result = _share_type_get_query( context, session, read_deleted, expected_fields). \ filter_by(id=id). \ first() if not result: raise exception.ShareTypeNotFound(share_type_id=id) share_type = _dict_with_extra_specs(result) if 'projects' in expected_fields: share_type['projects'] = [p['project_id'] for p in result['projects']] return share_type @require_context def share_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific share_type.""" return _share_type_get(context, id, session=None, inactive=inactive, expected_fields=expected_fields) @require_context def _share_type_get_by_name(context, name, session=None): result = model_query(context, models.ShareTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.ShareTypeNotFoundByName(share_type_name=name) return _dict_with_extra_specs(result) @require_context def share_type_get_by_name(context, name): """Return a dict describing specific share_type.""" return _share_type_get_by_name(context, name) @require_admin_context def share_type_destroy(context, id): session = get_session() with session.begin(): _share_type_get(context, id, session) results = model_query(context, models.Share, session=session, read_deleted="no").\ filter_by(share_type_id=id).count() cg_count = model_query(context, models.ConsistencyGroupShareTypeMapping, read_deleted="no", session=session).\ filter_by(share_type_id=id).count() if results or cg_count: LOG.error(_LE('ShareType %s deletion failed, ShareType in use.'), id) raise exception.ShareTypeInUse(share_type_id=id) model_query(context, models.ShareTypeExtraSpecs, session=session).\ filter_by(share_type_id=id).soft_delete() model_query(context, models.ShareTypes, session=session).\ filter_by(id=id).soft_delete() def _share_type_access_query(context, session=None): return model_query(context, models.ShareTypeProjects, session=session, read_deleted="no") @require_admin_context def share_type_access_get_all(context, type_id): share_type_id = _share_type_get_id_from_share_type(context, type_id) return _share_type_access_query(context).\ filter_by(share_type_id=share_type_id).all() @require_admin_context def share_type_access_add(context, type_id, project_id): """Add given tenant to the share type access list.""" share_type_id = _share_type_get_id_from_share_type(context, type_id) access_ref = models.ShareTypeProjects() access_ref.update({"share_type_id": share_type_id, "project_id": project_id}) session = get_session() with session.begin(): try: access_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.ShareTypeAccessExists(share_type_id=type_id, project_id=project_id) return access_ref @require_admin_context def share_type_access_remove(context, type_id, project_id): """Remove given tenant from the share type access list.""" share_type_id = _share_type_get_id_from_share_type(context, type_id) count = _share_type_access_query(context).\ filter_by(share_type_id=share_type_id).\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) if count == 0: raise exception.ShareTypeAccessNotFound( share_type_id=type_id, project_id=project_id) @require_context def volume_get_active_by_window(context, begin, end=None, project_id=None): """Return volumes that were active during window.""" query = model_query(context, models.Share, read_deleted="yes") query = query.filter(or_(models.Share.deleted_at is None, models.Share.deleted_at > begin)) if end: query = query.filter(models.Share.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() #################### def _share_type_extra_specs_query(context, share_type_id, session=None): return model_query(context, models.ShareTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(share_type_id=share_type_id).\ options(joinedload('share_type')) @require_context def share_type_extra_specs_get(context, share_type_id): rows = _share_type_extra_specs_query(context, share_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def share_type_extra_specs_delete(context, share_type_id, key): session = get_session() with session.begin(): _share_type_extra_specs_get_item(context, share_type_id, key, session) _share_type_extra_specs_query(context, share_type_id, session).\ filter_by(key=key).soft_delete() @require_context def _share_type_extra_specs_get_item(context, share_type_id, key, session=None): result = _share_type_extra_specs_query( context, share_type_id, session=session ).filter_by(key=key).options(joinedload('share_type')).first() if not result: raise exception.ShareTypeExtraSpecsNotFound( extra_specs_key=key, share_type_id=share_type_id) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def share_type_extra_specs_update_or_create(context, share_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.items(): try: spec_ref = _share_type_extra_specs_get_item( context, share_type_id, key, session) except exception.ShareTypeExtraSpecsNotFound: spec_ref = models.ShareTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "share_type_id": share_type_id, "deleted": 0}) spec_ref.save(session=session) return specs def ensure_availability_zone_exists(context, values, session, strict=True): az_name = values.pop('availability_zone', None) if strict and not az_name: msg = _("Values dict should have 'availability_zone' field.") raise ValueError(msg) elif not az_name: return if uuidutils.is_uuid_like(az_name): az_ref = availability_zone_get(context, az_name, session=session) else: az_ref = availability_zone_create_if_not_exist( context, az_name, session=session) values.update({'availability_zone_id': az_ref['id']}) def availability_zone_get(context, id_or_name, session=None): if session is None: session = get_session() query = model_query(context, models.AvailabilityZone, session=session) if uuidutils.is_uuid_like(id_or_name): query = query.filter_by(id=id_or_name) else: query = query.filter_by(name=id_or_name) result = query.first() if not result: raise exception.AvailabilityZoneNotFound(id=id_or_name) return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def availability_zone_create_if_not_exist(context, name, session=None): if session is None: session = get_session() az = models.AvailabilityZone() az.update({'id': uuidutils.generate_uuid(), 'name': name}) try: with session.begin(): az.save(session) # NOTE(u_glide): Do not catch specific exception here, because it depends # on concrete backend used by SqlAlchemy except Exception: return availability_zone_get(context, name, session=session) return az def availability_zone_get_all(context): session = get_session() enabled_services = model_query( context, models.Service, models.Service.availability_zone_id, session=session, read_deleted="no" ).filter_by(disabled=False).distinct() return model_query(context, models.AvailabilityZone, session=session, read_deleted="no").filter( models.AvailabilityZone.id.in_(enabled_services) ).all() #################### def _consistency_group_get(context, consistency_group_id, session=None): session = session or get_session() result = model_query(context, models.ConsistencyGroup, session=session, project_only=True, read_deleted='no').\ filter_by(id=consistency_group_id).\ options(joinedload('share_types')).\ first() if not result: raise exception.ConsistencyGroupNotFound( consistency_group_id=consistency_group_id) return result @require_context def consistency_group_get(context, consistency_group_id, session=None): return _consistency_group_get(context, consistency_group_id, session=session) def _consistency_group_get_all_query(context, session=None): session = session or get_session() return model_query(context, models.ConsistencyGroup, session=session, read_deleted='no') @require_admin_context def consistency_group_get_all(context, detailed=True): query = _consistency_group_get_all_query(context) if detailed: return query.options(joinedload('share_types')).all() else: query = query.with_entities(models.ConsistencyGroup.id, models.ConsistencyGroup.name) values = [] for item in query.all(): id, name = item values.append({"id": id, "name": name}) return values @require_admin_context def consistency_group_get_all_by_host(context, host, detailed=True): query = _consistency_group_get_all_query(context).filter_by(host=host) if detailed: return query.options(joinedload('share_types')).all() else: query = query.with_entities(models.ConsistencyGroup.id, models.ConsistencyGroup.name) values = [] for item in query.all(): id, name = item values.append({"id": id, "name": name}) return values @require_context def consistency_group_get_all_by_project(context, project_id, detailed=True): authorize_project_context(context, project_id) query = _consistency_group_get_all_query(context).filter_by( project_id=project_id) if detailed: return query.options(joinedload('share_types')).all() else: query = query.with_entities(models.ConsistencyGroup.id, models.ConsistencyGroup.name) values = [] for item in query.all(): id, name = item values.append({"id": id, "name": name}) return values @require_context def consistency_group_get_all_by_share_server(context, share_server_id): return _consistency_group_get_all_query(context).filter_by( share_server_id=share_server_id).all() @require_context def consistency_group_create(context, values): consistency_group = models.ConsistencyGroup() if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) mappings = [] for item in values.get('share_types') or []: mapping = models.ConsistencyGroupShareTypeMapping() mapping['id'] = six.text_type(uuid.uuid4()) mapping['share_type_id'] = item mapping['consistency_group_id'] = values['id'] mappings.append(mapping) values['share_types'] = mappings session = get_session() with session.begin(): consistency_group.update(values) session.add(consistency_group) return _consistency_group_get(context, values['id'], session=session) @require_context def consistency_group_update(context, consistency_group_id, values): session = get_session() with session.begin(): cg_ref = _consistency_group_get(context, consistency_group_id, session=session) cg_ref.update(values) cg_ref.save(session=session) return cg_ref @require_admin_context def consistency_group_destroy(context, consistency_group_id): session = get_session() with session.begin(): cg_ref = _consistency_group_get(context, consistency_group_id, session=session) cg_ref.soft_delete(session) session.query(models.ConsistencyGroupShareTypeMapping).\ filter_by(consistency_group_id=cg_ref['id']).soft_delete() @require_context def count_shares_in_consistency_group(context, consistency_group_id, session=None): session = session or get_session() return model_query( context, models.Share, session=session, project_only=True, read_deleted="no").\ filter_by(consistency_group_id=consistency_group_id).\ count() @require_context def count_cgsnapshots_in_consistency_group(context, consistency_group_id, session=None): session = session or get_session() return model_query( context, models.CGSnapshot, session=session, project_only=True, read_deleted="no").\ filter_by(consistency_group_id=consistency_group_id).\ count() @require_context def count_consistency_groups_in_share_network(context, share_network_id, session=None): session = session or get_session() return model_query( context, models.ConsistencyGroup, session=session, project_only=True, read_deleted="no").\ filter_by(share_network_id=share_network_id).\ count() @require_context def count_cgsnapshot_members_in_share(context, share_id, session=None): session = session or get_session() return model_query( context, models.CGSnapshotMember, session=session, project_only=True, read_deleted="no").\ filter_by(share_id=share_id).\ count() @require_context def _cgsnapshot_get(context, cgsnapshot_id, session=None): session = session or get_session() result = model_query(context, models.CGSnapshot, session=session, project_only=True, read_deleted='no').\ options(joinedload('cgsnapshot_members')).\ options(joinedload('consistency_group')).\ filter_by(id=cgsnapshot_id).\ first() if not result: raise exception.CGSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) return result def _cgsnapshot_get_all_query(context, session=None): session = session or get_session() return model_query(context, models.CGSnapshot, session=session, reade_deleted='no').\ options(joinedload('cgsnapshot_members')).\ options(joinedload('consistency_group')) @require_context def cgsnapshot_get(context, cgsnapshot_id, session=None): session = session or get_session() return _cgsnapshot_get(context, cgsnapshot_id, session=session) @require_admin_context def cgsnapshot_get_all(context, detailed=True): query = _cgsnapshot_get_all_query(context) if detailed: return query.all() else: query = query.with_entities(models.CGSnapshot.id, models.CGSnapshot.name) values = [] for item in query.all(): id, name = item values.append({"id": id, "name": name}) return values @require_context def cgsnapshot_get_all_by_project(context, project_id, detailed=True): authorize_project_context(context, project_id) query = _cgsnapshot_get_all_query(context).filter_by( project_id=project_id) if detailed: return query.all() else: query = query.with_entities(models.CGSnapshot.id, models.CGSnapshot.name) values = [] for item in query.all(): id, name = item values.append({"id": id, "name": name}) return values @require_context def cgsnapshot_create(context, values): cgsnapshot = models.CGSnapshot() if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) session = get_session() with session.begin(): cgsnapshot.update(values) session.add(cgsnapshot) return _cgsnapshot_get(context, values['id'], session=session) @require_context def cgsnapshot_update(context, cgsnapshot_id, values): session = get_session() with session.begin(): cg_ref = _cgsnapshot_get(context, cgsnapshot_id, session=session) cg_ref.update(values) cg_ref.save(session=session) return cg_ref @require_admin_context def cgsnapshot_destroy(context, cgsnapshot_id): session = get_session() with session.begin(): cgsnap_ref = _cgsnapshot_get(context, cgsnapshot_id, session=session) cgsnap_ref.soft_delete(session) session.query(models.CGSnapshotMember).\ filter_by(cgsnapshot_id=cgsnapshot_id).soft_delete() @require_context def cgsnapshot_members_get_all(context, cgsnapshot_id, session=None): session = session or get_session() query = model_query(context, models.CGSnapshotMember, session=session, read_deleted='no').filter_by( cgsnapshot_id=cgsnapshot_id) return query.all() @require_context def cgsnapshot_member_get(context, member_id, session=None): result = model_query(context, models.CGSnapshotMember, session=session, project_only=True, read_deleted='no').\ filter_by(id=member_id).\ first() if not result: raise exception.CGSnapshotMemberNotFound(member_id=member_id) return result @require_context def cgsnapshot_member_create(context, values): member = models.CGSnapshotMember() if not values.get('id'): values['id'] = six.text_type(uuid.uuid4()) session = get_session() with session.begin(): member.update(values) session.add(member) return cgsnapshot_member_get(context, values['id'], session=session) @require_context def cgsnapshot_member_update(context, member_id, values): session = get_session() with session.begin(): member = cgsnapshot_member_get(context, member_id, session=session) member.update(values) session.add(member) return cgsnapshot_member_get(context, member_id, session=session) manila-2.0.0/manila/db/migrations/0000775000567000056710000000000012701407265020121 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/migrations/utils.py0000664000567000056710000000136712701407107021635 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def load_table(name, connection): return sa.Table(name, sa.MetaData(), autoload=True, autoload_with=connection) manila-2.0.0/manila/db/migrations/__init__.py0000664000567000056710000000000012701407107022213 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/migrations/alembic/0000775000567000056710000000000012701407265021515 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/migrations/alembic/env.py0000664000567000056710000000253012701407107022652 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import with_statement from alembic import context from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models as db_models def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = db_api.get_engine() connection = engine.connect() target_metadata = db_models.ManilaBase.metadata context.configure(connection=connection, # pylint: disable=E1101 target_metadata=target_metadata) try: with context.begin_transaction(): # pylint: disable=E1101 context.run_migrations() # pylint: disable=E1101 finally: connection.close() run_migrations_online() manila-2.0.0/manila/db/migrations/alembic/versions/0000775000567000056710000000000012701407265023365 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/1f0bd302c1a6_add_availability_zones_table.py0000664000567000056710000001120612701407107033455 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_availability_zones_table Revision ID: 1f0bd302c1a6 Revises: 579c267fbb4d Create Date: 2015-07-24 12:09:36.008570 """ # revision identifiers, used by Alembic. revision = '1f0bd302c1a6' down_revision = '579c267fbb4d' from alembic import op from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy import Column, DateTime, ForeignKey, String, UniqueConstraint from manila.db.migrations import utils def collect_existing_az_from_services_table(connection, services_table, az_table): az_name_to_id_mapping = dict() existing_az = [] for service in connection.execute(services_table.select()): if service.availability_zone in az_name_to_id_mapping: continue az_id = uuidutils.generate_uuid() az_name_to_id_mapping[service.availability_zone] = az_id existing_az.append({ 'created_at': timeutils.utcnow(), 'id': az_id, 'name': service.availability_zone }) op.bulk_insert(az_table, existing_az) return az_name_to_id_mapping def upgrade(): connection = op.get_bind() # Create new AZ table and columns availability_zones_table = op.create_table( 'availability_zones', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), UniqueConstraint('name', 'deleted', name='az_name_uc'), mysql_engine='InnoDB', mysql_charset='utf8') for table_name, fk_name in (('services', 'service_az_id_fk'), ('share_instances', 'si_az_id_fk')): op.add_column( table_name, Column('availability_zone_id', String(36), ForeignKey('availability_zones.id', name=fk_name)) ) # Collect existing AZs from services table services_table = utils.load_table('services', connection) az_name_to_id_mapping = collect_existing_az_from_services_table( connection, services_table, availability_zones_table) # Map string AZ names to ID's in target tables set_az_id_in_table = lambda table, id, name: ( op.execute( table.update().where(table.c.availability_zone == name).values( {'availability_zone_id': id}) ) ) share_instances_table = utils.load_table('share_instances', connection) for name, id in az_name_to_id_mapping.items(): for table_name in [services_table, share_instances_table]: set_az_id_in_table(table_name, id, name) # Remove old AZ columns from tables op.drop_column('services', 'availability_zone') op.drop_column('share_instances', 'availability_zone') def downgrade(): connection = op.get_bind() # Create old AZ fields op.add_column('services', Column('availability_zone', String(length=255))) op.add_column('share_instances', Column('availability_zone', String(length=255))) # Migrate data az_table = utils.load_table('availability_zones', connection) share_instances_table = utils.load_table('share_instances', connection) services_table = utils.load_table('services', connection) for az in connection.execute(az_table.select()): op.execute( share_instances_table.update().where( share_instances_table.c.availability_zone_id == az.id ).values({'availability_zone': az.name}) ) op.execute( services_table.update().where( services_table.c.availability_zone_id == az.id ).values({'availability_zone': az.name}) ) # Remove AZ_id columns and AZ table op.drop_constraint('service_az_id_fk', 'services', type_='foreignkey') op.drop_column('services', 'availability_zone_id') op.drop_constraint('si_az_id_fk', 'share_instances', type_='foreignkey') op.drop_column('share_instances', 'availability_zone_id') op.drop_table('availability_zones') manila-2.0.0/manila/db/migrations/alembic/versions/533646c7af38_remove_unused_attr_status.py0000664000567000056710000000417312701407107033022 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove unused attr status Revision ID: 533646c7af38 Revises: 3a482171410f Create Date: 2015-05-28 13:13:47.651353 """ # revision identifiers, used by Alembic. revision = '533646c7af38' down_revision = '3a482171410f' from alembic import op from oslo_log import log import sqlalchemy as sql from manila.common import constants from manila.i18n import _LE LOG = log.getLogger(__name__) COLUMN_NAME = 'status' TABLE_NAMES = ('network_allocations', 'security_services') def upgrade(): for t_name in TABLE_NAMES: try: op.drop_column(t_name, COLUMN_NAME) except Exception: LOG.error(_LE("Column '%s' could not be dropped"), COLUMN_NAME) raise def downgrade(): for t_name in TABLE_NAMES: try: op.add_column( t_name, sql.Column( COLUMN_NAME, # NOTE(vponomaryov): original type of attr was enum. But # alembic is buggy with enums [1], so use string type # instead. Anyway we have no reason to keep enum/constraint # on specific set of possible statuses because they have # not been used. # [1] - https://bitbucket.org/zzzeek/alembic/ # issue/89/opadd_column-and-opdrop_column-should sql.String(255), default=constants.STATUS_NEW, ), ) except Exception: LOG.error(_LE("Column '%s' could not be added"), COLUMN_NAME) raise manila-2.0.0/manila/db/migrations/alembic/versions/579c267fbb4d_add_share_instances_access_map.py0000664000567000056710000000766012701407107034012 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instances_access_map Revision ID: 579c267fbb4d Revises: 5077ffcc5f1c Create Date: 2015-08-19 07:51:52.928542 """ # revision identifiers, used by Alembic. revision = '579c267fbb4d' down_revision = '5077ffcc5f1c' from alembic import op from sqlalchemy import Column, DateTime, ForeignKey, String from oslo_utils import uuidutils from manila.db.migrations import utils def upgrade(): """Create 'share_instance_access_map' table and move 'state' column.""" instance_access_table = op.create_table( 'share_instance_access_map', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_instance_id', String(length=36), ForeignKey('share_instances.id', name="siam_instance_fk")), Column('access_id', String(length=36), ForeignKey('share_access_map.id', name="siam_access_fk")), Column('state', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') # NOTE(u_glide): Move all states from 'share_access_map' # to 'share_instance_access_map' instance_access_mappings = [] connection = op.get_bind() access_table = utils.load_table('share_access_map', connection) instances_table = utils.load_table('share_instances', connection) for access_rule in connection.execute(access_table.select()): instances_query = instances_table.select().where( instances_table.c.share_id == access_rule.share_id ) for instance in connection.execute(instances_query): instance_access_mappings.append({ 'created_at': access_rule.created_at, 'updated_at': access_rule.updated_at, 'deleted_at': access_rule.deleted_at, 'deleted': access_rule.deleted, 'id': uuidutils.generate_uuid(), 'share_instance_id': instance.id, 'access_id': access_rule.id, 'state': access_rule.state, }) op.bulk_insert(instance_access_table, instance_access_mappings) op.drop_column('share_access_map', 'state') def downgrade(): """Remove 'share_instance_access_map' table and add 'state' column back. This method can lead to data loss because only first state is saved in share_access_map table. """ op.add_column('share_access_map', Column('state', String(length=255))) # NOTE(u_glide): Move all states from 'share_instance_access_map' # to 'share_access_map' connection = op.get_bind() access_table = utils.load_table('share_access_map', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) for access_rule in connection.execute(access_table.select()): access_mapping = connection.execute( instance_access_table.select().where( instance_access_table.c.deleted == "False").where( instance_access_table.c.access_id == access_rule['id']) ).first() op.execute( access_table.update().where( access_table.c.id == access_rule['id'] ).values({'state': access_mapping['state']}) ) op.drop_table('share_instance_access_map') manila-2.0.0/manila/db/migrations/alembic/versions/323840a08dc4_add_shares_task_state.py0000664000567000056710000000176612701407107032012 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add shares.task_state Revision ID: 323840a08dc4 Revises: 3651e16d7c43 Create Date: 2015-04-30 07:58:45.175790 """ # revision identifiers, used by Alembic. revision = '323840a08dc4' down_revision = '3651e16d7c43' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('shares', sa.Column('task_state', sa.String(255))) def downgrade(): op.drop_column('shares', 'task_state') manila-2.0.0/manila/db/migrations/alembic/versions/30cb96d995fa_add_is_public_column_for_share.py0000664000567000056710000000253712701407107034037 0ustar jenkinsjenkins00000000000000# Copyright 2015 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add public column for share Revision ID: 30cb96d995fa Revises: ef0c02b4366 Create Date: 2015-01-16 03:07:15.548947 """ # revision identifiers, used by Alembic. revision = '30cb96d995fa' down_revision = 'ef0c02b4366' from alembic import op from oslo_log import log import sqlalchemy as sa from manila.i18n import _LE LOG = log.getLogger(__name__) def upgrade(): try: op.add_column('shares', sa.Column('is_public', sa.Boolean, default=False)) except Exception: LOG.error(_LE("Column shares.is_public not created!")) raise def downgrade(): try: op.drop_column('shares', 'is_public') except Exception: LOG.error(_LE("Column shares.is_public not dropped!")) raise ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_networks.pymanila-2.0.0/manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_netw0000664000567000056710000000206712701407107034346 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_nova_net_id_column_to_share_networks Revision ID: 17115072e1c3 Revises: 38e632621e5a Create Date: 2015-02-05 18:07:19.062995 """ # revision identifiers, used by Alembic. revision = '17115072e1c3' down_revision = '38e632621e5a' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'share_networks', sa.Column('nova_net_id', sa.String(36), nullable=True)) def downgrade(): op.drop_column('share_networks', 'nova_net_id') manila-2.0.0/manila/db/migrations/alembic/versions/5077ffcc5f1c_add_share_instances.py0000664000567000056710000002655112701407107031707 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instances Revision ID: 5077ffcc5f1c Revises: 3db9992c30f3 Create Date: 2015-06-26 12:54:55.630152 """ # revision identifiers, used by Alembic. revision = '5077ffcc5f1c' down_revision = '3db9992c30f3' from alembic import op from sqlalchemy import Column, DateTime, ForeignKey, String import six from manila.db.migrations import utils def create_share_instances_table(connection): # Create 'share_instances' table share_instances_table = op.create_table( 'share_instances', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_id', String(length=36), ForeignKey('shares.id', name="si_share_fk")), Column('host', String(length=255)), Column('status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('share_network_id', String(length=36), ForeignKey('share_networks.id', name="si_share_network_fk"), nullable=True), Column('share_server_id', String(length=36), ForeignKey('share_servers.id', name="si_share_server_fk"), nullable=True), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') # Migrate data from 'shares' to 'share_instances' share_instances = [] shares_table = utils.load_table('shares', connection) for share in connection.execute(shares_table.select()): share_instances.append({ 'created_at': share.created_at, 'updated_at': share.updated_at, 'deleted_at': share.deleted_at, 'deleted': share.deleted, 'id': share.id, 'share_id': share.id, 'host': share.host, 'status': share.status, 'scheduled_at': share.scheduled_at, 'launched_at': share.launched_at, 'terminated_at': share.terminated_at, 'share_network_id': share.share_network_id, 'share_server_id': share.share_server_id, 'availability_zone': share.availability_zone, }) op.bulk_insert(share_instances_table, share_instances) # Remove columns moved to 'share_instances' table with op.batch_alter_table("shares") as batch_op: for fk in shares_table.foreign_keys: batch_op.drop_constraint(fk.name, type_='foreignkey') batch_op.drop_column('host') batch_op.drop_column('status') batch_op.drop_column('scheduled_at') batch_op.drop_column('launched_at') batch_op.drop_column('terminated_at') batch_op.drop_column('share_network_id') batch_op.drop_column('share_server_id') batch_op.drop_column('availability_zone') def remove_share_instances_table(connection): with op.batch_alter_table("shares") as batch_op: batch_op.add_column(Column('host', String(length=255))) batch_op.add_column(Column('status', String(length=255))) batch_op.add_column(Column('scheduled_at', DateTime)) batch_op.add_column(Column('launched_at', DateTime)) batch_op.add_column(Column('terminated_at', DateTime)) batch_op.add_column(Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=True)) batch_op.add_column(Column('share_server_id', String(length=36), ForeignKey('share_servers.id'), nullable=True)) batch_op.add_column(Column('availability_zone', String(length=255))) shares_table = utils.load_table('shares', connection) share_inst_table = utils.load_table('share_instances', connection) for share in connection.execute(shares_table.select()): instance = connection.execute( share_inst_table.select().where( share_inst_table.c.share_id == share.id) ).first() op.execute( shares_table.update().where( shares_table.c.id == share.id ).values( { 'host': instance['host'], 'status': instance['status'], 'scheduled_at': instance['scheduled_at'], 'launched_at': instance['launched_at'], 'terminated_at': instance['terminated_at'], 'share_network_id': instance['share_network_id'], 'share_server_id': instance['share_server_id'], 'availability_zone': instance['availability_zone'], } ) ) op.drop_table('share_instances') def create_snapshot_instances_table(connection): # Create 'share_snapshot_instances' table snapshot_instances_table = op.create_table( 'share_snapshot_instances', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('snapshot_id', String(length=36), ForeignKey('share_snapshots.id', name="ssi_snapshot_fk")), Column('share_instance_id', String(length=36), ForeignKey('share_instances.id', name="ssi_share_instance_fk")), Column('status', String(length=255)), Column('progress', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) # Migrate data from share_snapshots to share_snapshot_instances snapshot_instances = [] snapshot_table = utils.load_table('share_snapshots', connection) share_instances_table = utils.load_table('share_instances', connection) for snapshot in connection.execute(snapshot_table.select()): share_instances_rows = connection.execute( share_instances_table.select().where( share_instances_table.c.share_id == snapshot.share_id ) ) snapshot_instances.append({ 'created_at': snapshot.created_at, 'updated_at': snapshot.updated_at, 'deleted_at': snapshot.deleted_at, 'deleted': snapshot.deleted, 'id': snapshot.id, 'snapshot_id': snapshot.id, 'status': snapshot.status, 'progress': snapshot.progress, 'snapshot_instance_id': share_instances_rows.first().id, }) op.bulk_insert(snapshot_instances_table, snapshot_instances) # Remove columns moved to 'share_snapshot_instances' table with op.batch_alter_table("share_snapshots") as batch_op: batch_op.drop_column('status') batch_op.drop_column('progress') def remove_snapshot_instances_table(connection): with op.batch_alter_table("share_snapshots") as batch_op: batch_op.add_column(Column('status', String(length=255))) batch_op.add_column(Column('progress', String(length=255))) snapshots_table = utils.load_table('share_snapshots', connection) snapshots_inst_table = utils.load_table('share_snapshot_instances', connection) for snapshot_instance in connection.execute(snapshots_inst_table.select()): snapshot = connection.execute( snapshots_table.select().where( snapshots_table.c.id == snapshot_instance.snapshot_id) ).first() op.execute( snapshots_table.update().where( snapshots_table.c.id == snapshot.id ).values( { 'status': snapshot_instance['status'], 'progress': snapshot_instance['progress'], } ) ) op.drop_table('share_snapshot_instances') def upgrade_export_locations_table(connection): # Update 'share_export_locations' table op.add_column( 'share_export_locations', Column('share_instance_id', String(36), ForeignKey('share_instances.id', name="sel_instance_id_fk")) ) # Convert share_id to share_instance_id share_el_table = utils.load_table('share_export_locations', connection) share_instances_table = utils.load_table('share_instances', connection) for export in connection.execute(share_el_table.select()): share_instance = connection.execute( share_instances_table.select().where( share_instances_table.c.share_id == export.share_id) ).first() op.execute( share_el_table.update().where( share_el_table.c.id == export.id ).values({'share_instance_id': six.text_type(share_instance.id)}) ) with op.batch_alter_table("share_export_locations") as batch_op: batch_op.drop_constraint('sel_id_fk', type_='foreignkey') batch_op.drop_column('share_id') batch_op.rename_table('share_export_locations', 'share_instance_export_locations') def downgrade_export_locations_table(connection): op.rename_table('share_instance_export_locations', 'share_export_locations') op.add_column( 'share_export_locations', Column('share_id', String(36), ForeignKey('shares.id', name="sel_id_fk")) ) # Convert share_instance_id to share_id share_el_table = utils.load_table('share_export_locations', connection) share_instances_table = utils.load_table('share_instances', connection) for export in connection.execute(share_el_table.select()): share_instance = connection.execute( share_instances_table.select().where( share_instances_table.c.id == export.share_instance_id) ).first() op.execute( share_el_table.update().where( share_el_table.c.id == export.id ).values({'share_id': six.text_type(share_instance.share_id)}) ) with op.batch_alter_table("share_export_locations") as batch_op: batch_op.drop_constraint('sel_instance_id_fk', type_='foreignkey') batch_op.drop_column('share_instance_id') def upgrade(): connection = op.get_bind() create_share_instances_table(connection) create_snapshot_instances_table(connection) upgrade_export_locations_table(connection) def downgrade(): """Remove share_instances and share_snapshot_instance tables. This method can lead to data loss because only first share/snapshot instance is saved in shares/snapshot table. """ connection = op.get_bind() downgrade_export_locations_table(connection) remove_snapshot_instances_table(connection) remove_share_instances_table(connection) manila-2.0.0/manila/db/migrations/alembic/versions/3651e16d7c43_add_consistency_groups.py0000664000567000056710000001740612701407107032251 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Create Consistency Groups Tables and Columns Revision ID: 3651e16d7c43 Revises: 55761e5f59c5 Create Date: 2015-07-29 13:17:15.940454 """ # revision identifiers, used by Alembic. revision = '3651e16d7c43' down_revision = '55761e5f59c5' SHARE_NETWORK_FK_CONSTRAINT_NAME = "fk_cg_share_network_id" SHARE_SERVER_FK_CONSTRAINT_NAME = "fk_cg_share_server_id" SHARES_CG_FK_CONSTRAINT_NAME = "fk_shares_consistency_group_id" CG_MAP_FK_CONSTRAINT_NAME = "fk_cgstm_cg_id" SHARE_TYPE_FK_CONSTRAINT_NAME = "fk_cgstm_share_type_id" CGSNAP_CG_ID_FK_CONSTRAINT_NAME = "fk_cgsnapshots_consistency_group_id" CGSNAP_MEM_SHARETYPE_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_type_id" CGSNAP_MEM_SNAP_ID_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_cgsnapshot_id" CGSNAP_MEM_SHARE_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_id" CGSNAP_MEM_INST_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_instance_id" from alembic import op from oslo_log import log import sqlalchemy as sa from manila.i18n import _LE LOG = log.getLogger(__name__) def upgrade(): # New table - consistency_groups op.create_table( 'consistency_groups', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255)), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('status', sa.String(length=255)), sa.Column('source_cgsnapshot_id', sa.String(length=36)), sa.Column('share_network_id', sa.String(length=36), sa.ForeignKey('share_networks.id', name=SHARE_NETWORK_FK_CONSTRAINT_NAME), nullable=True), sa.Column('share_server_id', sa.String(length=36), sa.ForeignKey('share_servers.id', name=SHARE_SERVER_FK_CONSTRAINT_NAME), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') op.add_column( 'shares', sa.Column('consistency_group_id', sa.String(36), sa.ForeignKey('consistency_groups.id', name=SHARES_CG_FK_CONSTRAINT_NAME))) op.add_column('shares', sa.Column('source_cgsnapshot_member_id', sa.String(36))) op.create_table( 'consistency_group_share_type_mappings', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('consistency_group_id', sa.String(length=36), sa.ForeignKey('consistency_groups.id', name=CG_MAP_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name=SHARE_TYPE_FK_CONSTRAINT_NAME), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') op.create_table( 'cgsnapshots', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('consistency_group_id', sa.String(length=36), sa.ForeignKey('consistency_groups.id', name=CGSNAP_CG_ID_FK_CONSTRAINT_NAME), nullable=False), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('status', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') op.create_table( 'cgsnapshot_members', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('cgsnapshot_id', sa.String(length=36), sa.ForeignKey('cgsnapshots.id', name=CGSNAP_MEM_SNAP_ID_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_instance_id', sa.String(length=36), sa.ForeignKey('share_instances.id', name=CGSNAP_MEM_INST_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_id', sa.String(length=36), sa.ForeignKey('shares.id', name=CGSNAP_MEM_SHARE_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name=CGSNAP_MEM_SHARETYPE_FK_CONSTRAINT_NAME), nullable=False), sa.Column('size', sa.Integer), sa.Column('status', sa.String(length=255)), sa.Column('share_proto', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') def downgrade(): try: op.drop_table('cgsnapshot_members') except Exception: LOG.exception(_LE("Error Dropping 'cgsnapshot_members' table.")) try: op.drop_table('cgsnapshots') except Exception: LOG.exception(_LE("Error Dropping 'cgsnapshots' table.")) try: op.drop_table('consistency_group_share_type_mappings') except Exception: LOG.exception(_LE("Error Dropping " "'consistency_group_share_type_mappings' table.")) try: op.drop_column('shares', 'source_cgsnapshot_member_id') except Exception: LOG.exception(_LE("Error Dropping 'source_cgsnapshot_member_id' " "column from 'shares' table.")) try: op.drop_constraint(SHARES_CG_FK_CONSTRAINT_NAME, 'shares', type_='foreignkey') except Exception: LOG.exception(_LE("Error Dropping '%s' constraint.") % SHARES_CG_FK_CONSTRAINT_NAME) try: op.drop_column('shares', 'consistency_group_id') except Exception: LOG.exception(_LE("Error Dropping 'consistency_group_id' column " "from 'shares' table.")) try: op.drop_table('consistency_groups') except Exception: LOG.exception(_LE("Error Dropping 'consistency_groups' table.")) manila-2.0.0/manila/db/migrations/alembic/versions/56cdbe267881_add_share_export_locations_table.py0000664000567000056710000000735312701407107034327 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add share_export_locations table Revision ID: 56cdbe267881 Revises: 17115072e1c3 Create Date: 2015-02-27 14:06:30.464315 """ # revision identifiers, used by Alembic. revision = '56cdbe267881' down_revision = '30cb96d995fa' from alembic import op import sqlalchemy as sa from sqlalchemy import func from sqlalchemy.sql import table def upgrade(): export_locations_table = op.create_table( 'share_export_locations', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer, default=0), sa.Column('path', sa.String(2000)), sa.Column('share_id', sa.String(36), sa.ForeignKey('shares.id', name="sel_id_fk")), mysql_engine='InnoDB', mysql_charset='utf8') shares_table = table( 'shares', sa.Column('created_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('export_location', sa.String(length=255)), sa.Column('id', sa.String(length=36)), sa.Column('updated_at', sa.DateTime)) export_locations = [] session = sa.orm.Session(bind=op.get_bind().connect()) for share in session.query(shares_table).all(): deleted = share.deleted if isinstance(share.deleted, int) else 0 export_locations.append({ 'created_at': share.created_at, 'updated_at': share.updated_at, 'deleted_at': share.deleted_at, 'deleted': deleted, 'share_id': share.id, 'path': share.export_location, }) op.bulk_insert(export_locations_table, export_locations) op.drop_column('shares', 'export_location') session.close_all() def downgrade(): """Remove share_export_locations table. This method can lead to data loss because only first export_location is saved in shares table. """ op.add_column('shares', sa.Column('export_location', sa.String(255))) export_locations_table = table( 'share_export_locations', sa.Column('share_id', sa.String(length=36)), sa.Column('path', sa.String(length=255)), sa.Column('updated_at', sa.DateTime), sa.Column('deleted', sa.Integer)) connection = op.get_bind() session = sa.orm.Session(bind=connection.connect()) export_locations = session.query( func.min(export_locations_table.c.updated_at), export_locations_table.c.share_id, export_locations_table.c.path).filter( export_locations_table.c.deleted == 0).group_by( export_locations_table.c.share_id, export_locations_table.c.path).all() shares = sa.Table('shares', sa.MetaData(), autoload=True, autoload_with=connection) for location in export_locations: update = shares.update().where(shares.c.id == location.share_id). \ values(export_location=location.path) connection.execute(update) op.drop_table('share_export_locations') session.close_all() manila-2.0.0/manila/db/migrations/alembic/versions/59eb64046740_add_required_extra_spec.py0000664000567000056710000000470212701407107032265 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add required extra spec Revision ID: 59eb64046740 Revises: 162a3e673105 Create Date: 2015-01-29 15:33:25.348140 """ # revision identifiers, used by Alembic. revision = '59eb64046740' down_revision = '4ee2cf4be19a' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.sql import table def upgrade(): session = sa.orm.Session(bind=op.get_bind().connect()) es_table = table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('share_type_id', sa.String(length=36)), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255))) st_table = table( 'share_types', sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer)) # NOTE(vponomaryov): field 'deleted' is integer here. existing_required_extra_specs = session.query(es_table).\ filter(es_table.c.spec_key == 'driver_handles_share_servers').\ filter(es_table.c.deleted == 0).\ all() exclude_st_ids = [es.share_type_id for es in existing_required_extra_specs] # NOTE(vponomaryov): field 'deleted' is string here. share_types = session.query(st_table).\ filter(st_table.c.deleted.in_(('0', 'False', ))).\ filter(st_table.c.id.notin_(exclude_st_ids)).\ all() extra_specs = [] for st in share_types: extra_specs.append({ 'spec_key': 'driver_handles_share_servers', 'spec_value': 'True', 'deleted': 0, 'created_at': timeutils.utcnow(), 'share_type_id': st.id, }) op.bulk_insert(es_table, extra_specs) session.close_all() def downgrade(): """Downgrade method. We can't determine, which extra specs should be removed after insertion, that's why do nothing here. """ manila-2.0.0/manila/db/migrations/alembic/versions/ef0c02b4366_add_share_type_projects.py0000664000567000056710000000472612701407107032353 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add_share_type_projects Revision ID: ef0c02b4366 Revises: 17115072e1c3 Create Date: 2015-02-20 10:49:40.744974 """ # revision identifiers, used by Alembic. revision = 'ef0c02b4366' down_revision = '59eb64046740' from alembic import op from oslo_log import log import sqlalchemy as sql from manila.i18n import _LE LOG = log.getLogger(__name__) def upgrade(): meta = sql.MetaData() meta.bind = op.get_bind() is_public = sql.Column('is_public', sql.Boolean) try: op.add_column('share_types', is_public) share_types = sql.Table('share_types', meta, is_public.copy()) share_types.update().values(is_public=True).execute() except Exception: LOG.error(_LE("Column |%s| not created!"), repr(is_public)) raise try: op.create_table( 'share_type_projects', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('share_type_id', sql.String(36), sql.ForeignKey('share_types.id', name="stp_id_fk")), sql.Column('project_id', sql.String(length=255)), sql.Column('deleted', sql.Integer), sql.UniqueConstraint('share_type_id', 'project_id', 'deleted', name="stp_project_id_uc"), mysql_engine='InnoDB', ) except Exception: LOG.error(_LE("Table |%s| not created!"), 'share_type_projects') raise def downgrade(): try: op.drop_column('share_types', 'is_public') except Exception: LOG.error(_LE("share_types.is_public column not dropped")) raise try: op.drop_table('share_type_projects') except Exception: LOG.error(_LE("share_type_projects table not dropped")) raise manila-2.0.0/manila/db/migrations/alembic/versions/3db9992c30f3_transform_statuses_to_lowercase.py0000664000567000056710000000330112701407107034267 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Transform statuses to lowercase Revision ID: 3db9992c30f3 Revises: 533646c7af38 Create Date: 2015-05-28 19:30:35.645773 """ # revision identifiers, used by Alembic. revision = '3db9992c30f3' down_revision = '533646c7af38' from alembic import op import sqlalchemy as sa from manila.db.migrations import utils def upgrade(): # NOTE(vponomaryov): shares has some statuses as uppercase, so # transform them in addition to statuses of share servers. for table in ('shares', 'share_servers'): _transform_case(table, make_upper=False) def downgrade(): # NOTE(vponomaryov): transform share server statuses to uppercase and # leave share statuses as is. _transform_case('share_servers', make_upper=True) def _transform_case(table_name, make_upper): connection = op.get_bind() table = utils.load_table(table_name, connection) case = sa.func.upper if make_upper else sa.func.lower for row in connection.execute(table.select()): op.execute( table.update().where( table.c.id == row.id ).values({'status': case(row.status)}) ) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_location.pymanila-2.0.0/manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_locati0000664000567000056710000000206512701407107035061 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove share_snapshots.export_location Revision ID: 4ee2cf4be19a Revises: 17115072e1c3 Create Date: 2015-02-26 11:11:55.734663 """ # revision identifiers, used by Alembic. revision = '4ee2cf4be19a' down_revision = '17115072e1c3' from alembic import op import sqlalchemy as sql def upgrade(): op.drop_column('share_snapshots', 'export_location') def downgrade(): op.add_column('share_snapshots', sql.Column('export_location', sql.String(255))) manila-2.0.0/manila/db/migrations/alembic/versions/162a3e673105_manila_init.py0000664000567000056710000004107612701407107027675 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """manila_init Revision ID: 162a3e673105 Revises: None Create Date: 2014-07-23 17:51:57.077203 """ # revision identifiers, used by Alembic. revision = '162a3e673105' down_revision = None from alembic import op from oslo_log import log from sqlalchemy import Boolean, Column, DateTime, ForeignKey from sqlalchemy import Integer, MetaData, String, Table, UniqueConstraint from manila.i18n import _LE LOG = log.getLogger(__name__) def upgrade(): migrate_engine = op.get_bind().engine meta = MetaData() meta.bind = migrate_engine services = Table( 'services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quotas = Table( 'quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_classes = Table( 'quota_classes', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('class_name', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('hard_limit', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_usages = Table( 'quota_usages', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('project_id', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('in_use', Integer(), nullable=False), Column('reserved', Integer(), nullable=False), Column('until_refresh', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) reservations = Table( 'reservations', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('uuid', String(length=36, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), nullable=False), Column('usage_id', Integer(), ForeignKey('quota_usages.id'), nullable=False), Column('project_id', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False), index=True), Column('resource', String(length=255, convert_unicode=True, unicode_error=None, _warn_on_bytestring=False)), Column('delta', Integer(), nullable=False), Column('expire', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8', ) project_user_quotas = Table( 'project_user_quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('user_id', String(length=255), nullable=False), Column('project_id', String(length=255), nullable=False), Column('resource', String(length=25), nullable=False), Column('hard_limit', Integer, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) shares = Table( 'shares', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('snapshot_id', String(length=36)), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=True), Column('share_server_id', String(length=36), ForeignKey('share_servers.id'), nullable=True), Column('share_proto', String(255)), Column('export_location', String(255)), Column('volume_type_id', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) access_map = Table( 'share_access_map', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_id', String(36), ForeignKey('shares.id'), nullable=False), Column('access_type', String(255)), Column('access_to', String(255)), Column('state', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) share_snapshots = Table( 'share_snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('share_id', String(36), ForeignKey('shares.id'), nullable=False), Column('size', Integer), Column('status', String(length=255)), Column('progress', String(length=255)), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('share_size', Integer), Column('share_proto', String(length=255)), Column('export_location', String(255)), mysql_engine='InnoDB', mysql_charset='utf8' ) share_metadata = Table( 'share_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_id', String(length=36), ForeignKey('shares.id'), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) security_services = Table( 'security_services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('type', String(length=32), nullable=False), Column('dns_ip', String(length=64), nullable=True), Column('server', String(length=255), nullable=True), Column('domain', String(length=255), nullable=True), Column('user', String(length=255), nullable=True), Column('password', String(length=255), nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), Column('status', String(length=16)), mysql_engine='InnoDB', mysql_charset='utf8', ) share_networks = Table( 'share_networks', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('user_id', String(length=36)), Column('neutron_net_id', String(length=36), nullable=True), Column('neutron_subnet_id', String(length=36), nullable=True), Column('network_type', String(length=32), nullable=True), Column('segmentation_id', Integer, nullable=True), Column('cidr', String(length=64), nullable=True), Column('ip_version', Integer, nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) share_servers = Table( 'share_servers', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=True), Column('host', String(length=255), nullable=True), Column('status', String(length=32)), mysql_engine='InnoDB', mysql_charset='utf8', ) share_server_backend_details = Table( 'share_server_backend_details', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_server_id', String(length=36), ForeignKey('share_servers.id'), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) network_allocations = Table( 'network_allocations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('ip_address', String(length=64), nullable=True), Column('mac_address', String(length=32), nullable=True), Column('share_server_id', String(length=36), ForeignKey('share_servers.id'), nullable=False), Column('status', String(length=32)), mysql_engine='InnoDB', mysql_charset='utf8', ) ss_nw_association = Table( 'share_network_security_service_association', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=False), Column('security_service_id', String(length=36), ForeignKey('security_services.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) volume_types = Table( 'volume_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), UniqueConstraint('name', 'deleted', name='vt_name_uc'), mysql_engine='InnoDB' ) volume_type_extra_specs = Table( 'volume_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_type_id', String(length=36), ForeignKey('volume_types.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), mysql_engine='InnoDB' ) # create all tables # Take care on create order for those with FK dependencies tables = [quotas, services, quota_classes, quota_usages, reservations, project_user_quotas, security_services, share_networks, ss_nw_association, share_servers, network_allocations, shares, access_map, share_snapshots, share_server_backend_details, share_metadata, volume_types, volume_type_extra_specs] for table in tables: if not table.exists(): try: table.create() except Exception: LOG.info(repr(table)) LOG.exception(_LE('Exception while creating table.')) raise if migrate_engine.name == "mysql": tables = ["quotas", "services", "quota_classes", "quota_usages", "reservations", "project_user_quotas", "share_access_map", "share_snapshots", "share_metadata", "security_services", "share_networks", "network_allocations", "shares", "share_servers", "share_network_security_service_association", "volume_types", "volume_type_extra_specs", "share_server_backend_details"] migrate_engine.execute("SET foreign_key_checks = 0") for table in tables: migrate_engine.execute( "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) migrate_engine.execute("SET foreign_key_checks = 1") migrate_engine.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % migrate_engine.url.database) migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table) def downgrade(): raise NotImplementedError('Downgrade from initial Manila install is not' ' supported.') ././@LongLink0000000000000000000000000000020100000000000011206 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to_network_allocations_table.pymanila-2.0.0/manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to_0000664000567000056710000000345712701407107034460 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add more network info attributes to 'network_allocations' table. Revision ID: 5155c7077f99 Revises: 293fac1130ca Create Date: 2015-12-22 12:05:24.297049 """ # revision identifiers, used by Alembic. revision = '5155c7077f99' down_revision = '293fac1130ca' from alembic import op import sqlalchemy as sa def upgrade(): default_label_value = 'user' op.add_column( 'network_allocations', sa.Column('label', sa.String(255), default=default_label_value, server_default=default_label_value, nullable=True), ) op.add_column( 'network_allocations', sa.Column('network_type', sa.String(32), nullable=True)) op.add_column( 'network_allocations', sa.Column('segmentation_id', sa.Integer, nullable=True)) op.add_column( 'network_allocations', sa.Column('ip_version', sa.Integer, nullable=True)) op.add_column( 'network_allocations', sa.Column('cidr', sa.String(64), nullable=True)) def downgrade(): for col_name in ('label', 'network_type', 'segmentation_id', 'ip_version', 'cidr'): op.drop_column('network_allocations', col_name) ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.pymanila-2.0.0/manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_stat0000664000567000056710000001020412701407107034502 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove access rules status and add access_rule_status to share_instance model Revision ID: 344c1ac4747f Revises: dda6de06349 Create Date: 2015-11-18 14:58:55.806396 """ # revision identifiers, used by Alembic. revision = '344c1ac4747f' down_revision = 'dda6de06349' from alembic import op from sqlalchemy import Column, String from manila.common import constants from manila.db.migrations import utils priorities = { 'active': 0, 'new': 1, 'error': 2 } upgrade_data_mapping = { 'active': 'active', 'new': 'out_of_sync', 'error': 'error', } downgrade_data_mapping = { 'active': 'active', # NOTE(u_glide): We cannot determine is it applied rule or not in Manila, # so administrator should manually handle such access rules. 'out_of_sync': 'error', 'error': 'error', } def upgrade(): """Transform individual access rules states to 'access_rules_status'. WARNING: This method performs lossy converting of existing data in DB. """ op.add_column( 'share_instances', Column('access_rules_status', String(length=255)) ) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) # NOTE(u_glide): Data migrations shouldn't be performed on live clouds # because it will lead to unpredictable behaviour of running operations # like migration. instances_query = ( share_instances_table.select() .where(share_instances_table.c.status == constants.STATUS_AVAILABLE) .where(share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): access_mappings_query = instance_access_table.select().where( instance_access_table.c.share_instance_id == instance['id'] ).where(instance_access_table.c.deleted == 'False') status = constants.STATUS_ACTIVE for access_rule in connection.execute(access_mappings_query): if (access_rule['state'] == constants.STATUS_DELETING or access_rule['state'] not in priorities): continue if priorities[access_rule['state']] > priorities[status]: status = access_rule['state'] op.execute( share_instances_table.update().where( share_instances_table.c.id == instance['id'] ).values({'access_rules_status': upgrade_data_mapping[status]}) ) op.drop_column('share_instance_access_map', 'state') def downgrade(): op.add_column( 'share_instance_access_map', Column('state', String(length=255)) ) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) instances_query = ( share_instances_table.select() .where(share_instances_table.c.status == constants.STATUS_AVAILABLE) .where(share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): state = downgrade_data_mapping[instance['access_rules_status']] op.execute( instance_access_table.update().where( instance_access_table.c.share_instance_id == instance['id'] ).where(instance_access_table.c.deleted == 'False').values( {'state': state} ) ) op.drop_column('share_instances', 'access_rules_status') ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_share_types.pymanila-2.0.0/manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_s0000664000567000056710000000762512701407107034576 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add 'snapshot_support' extra spec to share types Revision ID: 55761e5f59c5 Revises: 1f0bd302c1a6 Create Date: 2015-08-13 14:02:54.656864 """ # revision identifiers, used by Alembic. revision = '55761e5f59c5' down_revision = '1f0bd302c1a6' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.sql import table from manila.common import constants def upgrade(): """Performs DB upgrade to support feature of making snapshots optional. Add 'snapshot_support' extra spec to all share types and attr 'snapshot_support' to Share model. """ session = sa.orm.Session(bind=op.get_bind().connect()) es_table = table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('share_type_id', sa.String(length=36)), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255))) st_table = table( 'share_types', sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer)) # NOTE(vponomaryov): field 'deleted' is integer here. existing_extra_specs = session.query(es_table).\ filter(es_table.c.spec_key == constants.ExtraSpecs.SNAPSHOT_SUPPORT).\ filter(es_table.c.deleted == 0).\ all() exclude_st_ids = [es.share_type_id for es in existing_extra_specs] # NOTE(vponomaryov): field 'deleted' is string here. share_types = session.query(st_table).\ filter(st_table.c.deleted.in_(('0', 'False', ))).\ filter(st_table.c.id.notin_(exclude_st_ids)).\ all() session.close_all() extra_specs = [] now = timeutils.utcnow() for st in share_types: extra_specs.append({ 'spec_key': constants.ExtraSpecs.SNAPSHOT_SUPPORT, 'spec_value': 'True', 'deleted': 0, 'created_at': now, 'share_type_id': st.id, }) if extra_specs: op.bulk_insert(es_table, extra_specs) # NOTE(vponomaryov): shares that were created before applying this # migration can have incorrect value because they were created without # consideration of driver capability to create snapshots. op.add_column('shares', sa.Column('snapshot_support', sa.Boolean, default=True)) connection = op.get_bind().connect() shares = sa.Table( 'shares', sa.MetaData(), autoload=True, autoload_with=connection) update = shares.update().where(shares.c.deleted == 'False').values( snapshot_support=True) connection.execute(update) def downgrade(): """Performs DB downgrade removing support of 'optional snapshots' feature. Remove 'snapshot_support' extra spec from all share types and attr 'snapshot_support' from Share model. """ connection = op.get_bind().connect() extra_specs = sa.Table( 'share_type_extra_specs', sa.MetaData(), autoload=True, autoload_with=connection) update = extra_specs.update().where( extra_specs.c.spec_key == constants.ExtraSpecs.SNAPSHOT_SUPPORT).where( extra_specs.c.deleted == 0).values( deleted=extra_specs.c.id, deleted_at=timeutils.utcnow(), ) connection.execute(update) op.drop_column('shares', 'snapshot_support') manila-2.0.0/manila/db/migrations/alembic/versions/211836bf835c_add_access_level.py0000664000567000056710000000217212701407107030733 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add access level Revision ID: 211836bf835c Revises: 162a3e673105 Create Date: 2014-12-19 05:34:06.790159 """ # revision identifiers, used by Alembic. revision = '211836bf835c' down_revision = '162a3e673105' from alembic import op import sqlalchemy as sa from manila.common import constants def upgrade(): op.add_column('share_access_map', sa.Column('access_level', sa.String(2), default=constants.ACCESS_LEVEL_RW)) def downgrade(): op.drop_column('share_access_map', 'access_level') manila-2.0.0/manila/db/migrations/alembic/versions/3a482171410f_add_drivers_private_data_table.py0000664000567000056710000000412112701407107033555 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_driver_private_data_table Revision ID: 3a482171410f Revises: 56cdbe267881 Create Date: 2015-04-21 14:47:38.201658 """ # revision identifiers, used by Alembic. revision = '3a482171410f' down_revision = '56cdbe267881' from alembic import op from oslo_log import log import sqlalchemy as sql from manila.i18n import _LE LOG = log.getLogger(__name__) drivers_private_data_table_name = 'drivers_private_data' def upgrade(): try: op.create_table( drivers_private_data_table_name, sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.Integer, default=0), sql.Column('host', sql.String(255), nullable=False, primary_key=True), sql.Column('entity_uuid', sql.String(36), nullable=False, primary_key=True), sql.Column('key', sql.String(255), nullable=False, primary_key=True), sql.Column('value', sql.String(1023), nullable=False), mysql_engine='InnoDB', ) except Exception: LOG.error(_LE("Table |%s| not created!"), drivers_private_data_table_name) raise def downgrade(): try: op.drop_table(drivers_private_data_table_name) except Exception: LOG.error(_LE("%s table not dropped"), drivers_private_data_table_name) raise manila-2.0.0/manila/db/migrations/alembic/versions/38e632621e5a_change_volume_type_to_share_type.py0000664000567000056710000001253112701407107034274 0ustar jenkinsjenkins00000000000000# Copyright 2015 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change volume_type to share_type Revision ID: 38e632621e5a Revises: 162a3e673105 Create Date: 2014-10-02 09:14:03.172324 """ # revision identifiers, used by Alembic. revision = '38e632621e5a' down_revision = '211836bf835c' from alembic import op from oslo_log import log from oslo_utils import strutils import sqlalchemy as sa from sqlalchemy.sql import table from manila.i18n import _LI LOG = log.getLogger(__name__) def upgrade(): LOG.info(_LI("Renaming column name shares.volume_type_id to " "shares.share_type.id")) op.alter_column("shares", "volume_type_id", new_column_name="share_type_id", type_=sa.String(length=36)) LOG.info(_LI("Renaming volume_types table to share_types")) op.rename_table("volume_types", "share_types") op.drop_constraint('vt_name_uc', 'share_types', type_='unique') op.create_unique_constraint('st_name_uc', 'share_types', ['name', 'deleted']) LOG.info(_LI("Creating share_type_extra_specs table")) st_es = op.create_table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name="st_id_fk"), nullable=False), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255)), mysql_engine='InnoDB') LOG.info(_LI("Migrating volume_type_extra_specs to " "share_type_extra_specs")) _copy_records(destination_table=st_es, up_migration=True) LOG.info(_LI("Dropping volume_type_extra_specs table")) op.drop_table("volume_type_extra_specs") def downgrade(): LOG.info(_LI("Creating volume_type_extra_specs table")) vt_es = op.create_table( 'volume_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('volume_type_id', sa.String(length=36), sa.ForeignKey('share_types.id'), nullable=False), sa.Column('key', sa.String(length=255)), sa.Column('value', sa.String(length=255)), mysql_engine='InnoDB') LOG.info(_LI("Migrating share_type_extra_specs to " "volume_type_extra_specs")) _copy_records(destination_table=vt_es, up_migration=False) LOG.info(_LI("Dropping share_type_extra_specs table")) op.drop_table("share_type_extra_specs") LOG.info(_LI("Renaming share_types table to volume_types")) op.drop_constraint('st_name_uc', 'share_types', type_='unique') op.create_unique_constraint('vt_name_uc', 'share_types', ['name', 'deleted']) op.rename_table("share_types", "volume_types") LOG.info(_LI("Renaming column name shares.share_type_id to " "shares.volume_type.id")) op.alter_column("shares", "share_type_id", new_column_name="volume_type_id", type_=sa.String(length=36)) def _copy_records(destination_table, up_migration=True): old = ('volume', '') new = ('share', 'spec_') data_from, data_to = (old, new) if up_migration else (new, old) from_table = table( data_from[0] + '_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer if up_migration else sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column(data_from[0] + '_type_id', sa.String(length=36)), sa.Column(data_from[1] + 'key', sa.String(length=255)), sa.Column(data_from[1] + 'value', sa.String(length=255))) extra_specs = [] for es in op.get_bind().execute(from_table.select()): if up_migration: deleted = strutils.int_from_bool_as_string(es.deleted) else: deleted = strutils.bool_from_string(es.deleted) extra_specs.append({ 'created_at': es.created_at, 'updated_at': es.updated_at, 'deleted_at': es.deleted_at, 'deleted': deleted, 'id': es.id, data_to[0] + '_type_id': getattr(es, data_from[0] + '_type_id'), data_to[1] + 'key': getattr(es, data_from[1] + 'key'), data_to[1] + 'value': getattr(es, data_from[1] + 'value'), }) op.bulk_insert(destination_table, extra_specs) manila-2.0.0/manila/db/migrations/alembic/versions/293fac1130ca_add_replication_attrs.py0000664000567000056710000000252012701407107032147 0ustar jenkinsjenkins00000000000000# Copyright 2015 Goutham Pacha Ravi. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add replication attributes to Share and ShareInstance models. Revision ID: 293fac1130ca Revises: 344c1ac4747f Create Date: 2015-09-10 15:45:07.273043 """ # revision identifiers, used by Alembic. revision = '293fac1130ca' down_revision = '344c1ac4747f' from alembic import op import sqlalchemy as sa def upgrade(): """Add replication attributes to Shares and ShareInstances.""" op.add_column('shares', sa.Column('replication_type', sa.String(255))) op.add_column('share_instances', sa.Column('replica_state', sa.String(255))) def downgrade(): """Remove replication attributes from Shares and ShareInstances.""" op.drop_column('shares', 'replication_type') op.drop_column('share_instances', 'replica_state') manila-2.0.0/manila/db/migrations/alembic/versions/dda6de06349_add_export_locations_metadata.py0000664000567000056710000000757012701407107033624 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add DB support for share instance export locations metadata. Revision ID: dda6de06349 Revises: 323840a08dc4 Create Date: 2015-11-30 13:50:15.914232 """ # revision identifiers, used by Alembic. revision = 'dda6de06349' down_revision = '323840a08dc4' from alembic import op from oslo_log import log from oslo_utils import uuidutils import sqlalchemy as sa from manila.i18n import _LE SI_TABLE_NAME = 'share_instances' EL_TABLE_NAME = 'share_instance_export_locations' ELM_TABLE_NAME = 'share_instance_export_locations_metadata' LOG = log.getLogger(__name__) def upgrade(): try: meta = sa.MetaData() meta.bind = op.get_bind() # Add new 'is_admin_only' column in export locations table that will be # used for hiding admin export locations from common users in API. op.add_column( EL_TABLE_NAME, sa.Column('is_admin_only', sa.Boolean, default=False)) # Create new 'uuid' column as String(36) in export locations table # that will be used for API. op.add_column( EL_TABLE_NAME, sa.Column('uuid', sa.String(36), unique=True), ) # Generate UUID for each existing export location. el_table = sa.Table( EL_TABLE_NAME, meta, sa.Column('id', sa.Integer), sa.Column('uuid', sa.String(36)), sa.Column('is_admin_only', sa.Boolean), ) for record in el_table.select().execute(): el_table.update().values( is_admin_only=False, uuid=uuidutils.generate_uuid(), ).where( el_table.c.id == record.id, ).execute() # Make new 'uuid' column in export locations table not nullable. op.alter_column( EL_TABLE_NAME, 'uuid', existing_type=sa.String(length=36), nullable=False, ) except Exception: LOG.error(_LE("Failed to update '%s' table!"), EL_TABLE_NAME) raise try: op.create_table( ELM_TABLE_NAME, sa.Column('id', sa.Integer, primary_key=True), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('export_location_id', sa.Integer, sa.ForeignKey('%s.id' % EL_TABLE_NAME, name="elm_id_fk"), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=1023), nullable=False), sa.UniqueConstraint('export_location_id', 'key', 'deleted', name="elm_el_id_uc"), mysql_engine='InnoDB', ) except Exception: LOG.error(_LE("Failed to create '%s' table!"), ELM_TABLE_NAME) raise def downgrade(): try: op.drop_table(ELM_TABLE_NAME) except Exception: LOG.error(_LE("Failed to drop '%s' table!"), ELM_TABLE_NAME) raise try: op.drop_column(EL_TABLE_NAME, 'is_admin_only') op.drop_column(EL_TABLE_NAME, 'uuid') except Exception: LOG.error(_LE("Failed to update '%s' table!"), EL_TABLE_NAME) raise ././@LongLink0000000000000000000000000000016500000000000011217 Lustar 00000000000000manila-2.0.0/manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snapshot_instances.pymanila-2.0.0/manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snaps0000664000567000056710000000205112701407107034760 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add provider_location to share_snapshot_instances Revision ID: eb6d5544cbbd Revises: 5155c7077f99 Create Date: 2016-02-12 22:25:39.594545 """ # revision identifiers, used by Alembic. revision = 'eb6d5544cbbd' down_revision = '5155c7077f99' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'share_snapshot_instances', sa.Column('provider_location', sa.String(255), nullable=True)) def downgrade(): op.drop_column('share_snapshot_instances', 'provider_location') manila-2.0.0/manila/db/migrations/alembic/__init__.py0000664000567000056710000000000012701407107023607 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/db/migrations/alembic/script.py.mako0000664000567000056710000000167112701407107024321 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} manila-2.0.0/manila/db/migrations/alembic/migration.py0000664000567000056710000000445612701407107024064 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config import alembic.migration as alembic_migration from oslo_config import cfg from manila.db.sqlalchemy import api as db_api CONF = cfg.CONF def _alembic_config(): path = os.path.join(os.path.dirname(__file__), os.pardir, 'alembic.ini') config = alembic_config.Config(path) return config def version(): """Current database version. :returns: Database version :rtype: string """ engine = db_api.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def upgrade(revision): """Upgrade database. :param version: Desired database version :type version: string """ return alembic.command.upgrade(_alembic_config(), revision or 'head') def downgrade(revision): """Downgrade database. :param version: Desired database version :type version: string """ return alembic.command.downgrade(_alembic_config(), revision or 'base') def stamp(revision): """Stamp database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ return alembic.command.stamp(_alembic_config(), revision or 'head') def revision(message=None, autogenerate=False): """Create template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ return alembic.command.revision(_alembic_config(), message, autogenerate) manila-2.0.0/manila/db/migrations/alembic.ini0000664000567000056710000000214712701407107022215 0ustar jenkinsjenkins00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S manila-2.0.0/manila/db/base.py0000664000567000056710000000252612701407107017231 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='manila.db', help='Driver to use for database access.') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 manila-2.0.0/manila/db/migration.py0000664000567000056710000000272512701407107020311 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from manila import utils IMPL = utils.LazyPluggable( 'db_backend', sqlalchemy='manila.db.migrations.alembic.migration') def upgrade(version): """Upgrade database to 'version' or the most recent version.""" return IMPL.upgrade(version) def downgrade(version): """Downgrade database to 'version' or to initial state.""" return IMPL.downgrade(version) def version(): """Display the current database version.""" return IMPL.version() def stamp(version): """Stamp database with 'version' or the most recent version.""" return IMPL.stamp(version) def revision(message, autogenerate): """Generate new migration script.""" return IMPL.revision(message, autogenerate) manila-2.0.0/manila/db/api.py0000664000567000056710000011551212701407112017064 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the manila.db namespace. Call these functions from manila.db namespace, not the manila.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/manila/manila.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from oslo_config import cfg from oslo_db import api as db_api db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for database.'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create.'), cfg.StrOpt('share_name_template', default='share-%s', help='Template string to be used to generate share names.'), cfg.StrOpt('share_snapshot_name_template', default='share-snapshot-%s', help='Template string to be used to generate share snapshot ' 'names.'), ] CONF = cfg.CONF CONF.register_opts(db_opts) _BACKEND_MAPPING = {'sqlalchemy': 'manila.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" return IMPL.authorize_project_context(context, project_id) def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" return IMPL.authorize_quota_class_context(context, class_name) ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_share_sorted(context): """Get all share services sorted by share count. :returns: a list of (Service, share_count) tuples. """ return IMPL.service_get_all_share_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) #################### def quota_create(context, project_id, resource, limit, user_id=None): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit, user_id=user_id) def quota_get(context, project_id, resource, user_id=None): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource, user_id=user_id) def quota_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all quotas associated with a given project and user.""" return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_get_all(context, project_id): """Retrieve all user quotas associated with a given project.""" return IMPL.quota_get_all(context, project_id) def quota_update(context, project_id, resource, limit, user_id=None): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit, user_id=user_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_default(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_default(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) ################### def quota_usage_get(context, project_id, resource, user_id=None): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id) def quota_usage_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project_and_user(context, project_id, user_id) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) def quota_usage_create(context, project_id, user_id, resource, in_use, reserved=0, until_refresh=None): """Create a quota usage.""" return IMPL.quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh) def quota_usage_update(context, project_id, user_id, resource, **kwargs): """Update a quota usage or raise if it does not exist.""" return IMPL.quota_usage_update(context, project_id, user_id, resource, **kwargs) ################### def reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire): """Create a reservation for the given project and resource.""" return IMPL.reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire) def reservation_get(context, uuid): """Retrieve a reservation or raise if it does not exist.""" return IMPL.reservation_get(context, uuid) ################### def quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=project_id, user_id=user_id) def reservation_commit(context, reservations, project_id=None, user_id=None): """Commit quota reservations.""" return IMPL.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def quota_destroy_all_by_project_and_user(context, project_id, user_id): """Destroy all quotas associated with a given project and user.""" return IMPL.quota_destroy_all_by_project_and_user(context, project_id, user_id) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_all_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) ################### def share_instance_get(context, instance_id, with_share_data=False): """Get share instance by id.""" return IMPL.share_instance_get(context, instance_id, with_share_data=with_share_data) def share_instance_create(context, share_id, values): """Create new share instance.""" return IMPL.share_instance_create(context, share_id, values) def share_instance_delete(context, instance_id): """Delete share instance.""" return IMPL.share_instance_delete(context, instance_id) def share_instance_update(context, instance_id, values, with_share_data=False): """Update share instance fields.""" return IMPL.share_instance_update(context, instance_id, values, with_share_data=with_share_data) def share_instances_get_all(context): """Returns all share instances.""" return IMPL.share_instances_get_all(context) def share_instances_get_all_by_share_server(context, share_server_id): """Returns all share instances with given share_server_id.""" return IMPL.share_instances_get_all_by_share_server(context, share_server_id) def share_instances_get_all_by_host(context, host): """Returns all share instances with given host.""" return IMPL.share_instances_get_all_by_host(context, host) def share_instances_get_all_by_share_network(context, share_network_id): """Returns list of shares that belong to given share network.""" return IMPL.share_instances_get_all_by_share_network(context, share_network_id) def share_instances_get_all_by_share(context, share_id): """Returns list of shares that belong to given share.""" return IMPL.share_instances_get_all_by_share(context, share_id) def share_instances_get_all_by_consistency_group_id(context, cg_id): """Returns list of share instances that belong to given cg.""" return IMPL.share_instances_get_all_by_consistency_group_id(context, cg_id) ################### def share_create(context, values, create_share_instance=True): """Create new share.""" return IMPL.share_create(context, values, create_share_instance=create_share_instance) def share_data_get_for_project(context, project_id, session=None): """Get (share_count, gigabytes) for project.""" return IMPL.share_data_get_for_project(context, project_id) def share_update(context, share_id, values): """Update share fields.""" return IMPL.share_update(context, share_id, values) def share_get(context, share_id): """Get share by id.""" return IMPL.share_get(context, share_id) def share_get_all(context, filters=None, sort_key=None, sort_dir=None): """Get all shares.""" return IMPL.share_get_all( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_get_all_by_project(context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns all shares with given project ID.""" return IMPL.share_get_all_by_project( context, project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir, ) def share_get_all_by_consistency_group_id(context, cg_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given project ID and CG id.""" return IMPL.share_get_all_by_consistency_group_id( context, cg_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given share server ID.""" return IMPL.share_get_all_by_share_server( context, share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_delete(context, share_id): """Delete share.""" return IMPL.share_delete(context, share_id) ################### def share_access_create(context, values): """Allow access to share.""" return IMPL.share_access_create(context, values) def share_instance_access_copy(context, share_id, instance_id): """Maps the existing access rules for the share to the instance in the DB. Adds the instance mapping to the share's access rules and returns the share's access rules. """ return IMPL.share_instance_access_copy(context, share_id, instance_id) def share_access_get(context, access_id): """Get share access rule.""" return IMPL.share_access_get(context, access_id) def share_instance_access_get(context, access_id, instance_id): """Get access rule mapping for share instance.""" return IMPL.share_instance_access_get(context, access_id, instance_id) def share_access_get_all_for_share(context, share_id): """Get all access rules for given share.""" return IMPL.share_access_get_all_for_share(context, share_id) def share_instance_access_get_all(context, access_id, session=None): """Get access rules to all share instances.""" return IMPL.share_instance_access_get_all(context, access_id, session=None) def share_access_get_all_for_instance(context, instance_id, session=None): """Get all access rules related to a certain share instance.""" return IMPL.share_access_get_all_for_instance( context, instance_id, session=None) def share_access_get_all_by_type_and_access(context, share_id, access_type, access): """Returns share access by given type and access.""" return IMPL.share_access_get_all_by_type_and_access( context, share_id, access_type, access) def share_access_delete(context, access_id): """Deny access to share.""" return IMPL.share_access_delete(context, access_id) def share_instance_access_delete(context, mapping_id): """Deny access to share instance.""" return IMPL.share_instance_access_delete(context, mapping_id) def share_instance_update_access_status(context, share_instance_id, status): """Update access rules status of share instance.""" return IMPL.share_instance_update_access_status(context, share_instance_id, status) #################### def share_snapshot_instance_update(context, instance_id, values): """Set the given properties on a share snapshot instance and update it. Raises NotFound if snapshot instance does not exist. """ return IMPL.share_snapshot_instance_update(context, instance_id, values) def share_snapshot_instance_create(context, snapshot_id, values): """Create a share snapshot instance for an existing snapshot.""" return IMPL.share_snapshot_instance_create( context, snapshot_id, values) def share_snapshot_instance_get(context, instance_id, with_share_data=False): """Get a snapshot instance or raise a NotFound exception.""" return IMPL.share_snapshot_instance_get( context, instance_id, with_share_data=with_share_data) def share_snapshot_instance_get_all_with_filters(context, filters, with_share_data=False): """Get all snapshot instances satisfying provided filters.""" return IMPL.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=with_share_data) def share_snapshot_instance_delete(context, snapshot_instance_id): """Delete a share snapshot instance.""" return IMPL.share_snapshot_instance_delete(context, snapshot_instance_id) #################### def share_snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.share_snapshot_create(context, values) def snapshot_data_get_for_project(context, project_id, session=None): """Get (snapshot_count, gigabytes) for project.""" return IMPL.snapshot_data_get_for_project(context, project_id) def share_snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.share_snapshot_destroy(context, snapshot_id) def share_snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.share_snapshot_get(context, snapshot_id) def share_snapshot_get_all(context, filters=None, sort_key=None, sort_dir=None): """Get all snapshots.""" return IMPL.share_snapshot_get_all( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_snapshot_get_all_by_project(context, project_id, filters=None, sort_key=None, sort_dir=None): """Get all snapshots belonging to a project.""" return IMPL.share_snapshot_get_all_by_project( context, project_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_snapshot_get_all_for_share(context, share_id, filters=None, sort_key=None, sort_dir=None): """Get all snapshots for a share.""" return IMPL.share_snapshot_get_all_for_share( context, share_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.share_snapshot_update(context, snapshot_id, values) def share_snapshot_data_get_for_project(context, project_id, session=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.share_snapshot_data_get_for_project(context, project_id, session=None) ################### def security_service_create(context, values): """Create security service DB record.""" return IMPL.security_service_create(context, values) def security_service_delete(context, id): """Delete security service DB record.""" return IMPL.security_service_delete(context, id) def security_service_update(context, id, values): """Update security service DB record.""" return IMPL.security_service_update(context, id, values) def security_service_get(context, id): """Get security service DB record.""" return IMPL.security_service_get(context, id) def security_service_get_all(context): """Get all security service DB records.""" return IMPL.security_service_get_all(context) def security_service_get_all_by_project(context, project_id): """Get all security service DB records for the given project.""" return IMPL.security_service_get_all_by_project(context, project_id) #################### def share_metadata_get(context, share_id): """Get all metadata for a share.""" return IMPL.share_metadata_get(context, share_id) def share_metadata_delete(context, share_id, key): """Delete the given metadata item.""" IMPL.share_metadata_delete(context, share_id, key) def share_metadata_update(context, share, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.share_metadata_update(context, share, metadata, delete) ################### def share_export_location_get_by_uuid(context, export_location_uuid): """Get specific export location of a share.""" return IMPL.share_export_location_get_by_uuid( context, export_location_uuid) def share_export_locations_get(context, share_id): """Get all export locations of a share.""" return IMPL.share_export_locations_get(context, share_id) def share_export_locations_get_by_share_id(context, share_id, include_admin_only=True): """Get all export locations of a share by its ID.""" return IMPL.share_export_locations_get_by_share_id( context, share_id, include_admin_only=include_admin_only) def share_export_locations_get_by_share_instance_id(context, share_instance_id): """Get all export locations of a share instance by its ID.""" return IMPL.share_export_locations_get_by_share_instance_id( context, share_instance_id) def share_export_locations_update(context, share_instance_id, export_locations, delete=True): """Update export locations of a share instance.""" return IMPL.share_export_locations_update( context, share_instance_id, export_locations, delete) #################### def export_location_metadata_get(context, export_location_uuid, session=None): """Get all metadata of an export location.""" return IMPL.export_location_metadata_get( context, export_location_uuid, session=session) def export_location_metadata_delete(context, export_location_uuid, keys, session=None): """Delete metadata of an export location.""" return IMPL.export_location_metadata_delete( context, export_location_uuid, keys, session=session) def export_location_metadata_update(context, export_location_uuid, metadata, delete, session=None): """Update metadata of an export location.""" return IMPL.export_location_metadata_update( context, export_location_uuid, metadata, delete, session=session) #################### def share_network_create(context, values): """Create a share network DB record.""" return IMPL.share_network_create(context, values) def share_network_delete(context, id): """Delete a share network DB record.""" return IMPL.share_network_delete(context, id) def share_network_update(context, id, values): """Update a share network DB record.""" return IMPL.share_network_update(context, id, values) def share_network_get(context, id): """Get requested share network DB record.""" return IMPL.share_network_get(context, id) def share_network_get_all(context): """Get all share network DB records.""" return IMPL.share_network_get_all(context) def share_network_get_all_by_project(context, project_id): """Get all share network DB records for the given project.""" return IMPL.share_network_get_all_by_project(context, project_id) def share_network_get_all_by_security_service(context, security_service_id): """Get all share network DB records for the given project.""" return IMPL.share_network_get_all_by_security_service( context, security_service_id) def share_network_add_security_service(context, id, security_service_id): return IMPL.share_network_add_security_service(context, id, security_service_id) def share_network_remove_security_service(context, id, security_service_id): return IMPL.share_network_remove_security_service(context, id, security_service_id) ################## def network_allocation_create(context, values): """Create a network allocation DB record.""" return IMPL.network_allocation_create(context, values) def network_allocation_delete(context, id): """Delete a network allocation DB record.""" return IMPL.network_allocation_delete(context, id) def network_allocation_update(context, id, values): """Update a network allocation DB record.""" return IMPL.network_allocation_update(context, id, values) def network_allocations_get_for_share_server(context, share_server_id, session=None, label=None): """Get network allocations for share server.""" return IMPL.network_allocations_get_for_share_server( context, share_server_id, label=label, session=session) def network_allocations_get_by_ip_address(context, ip_address): """Get network allocations by IP address.""" return IMPL.network_allocations_get_by_ip_address(context, ip_address) ################## def share_server_create(context, values): """Create share server DB record.""" return IMPL.share_server_create(context, values) def share_server_delete(context, id): """Delete share server DB record.""" return IMPL.share_server_delete(context, id) def share_server_update(context, id, values): """Update share server DB record.""" return IMPL.share_server_update(context, id, values) def share_server_get(context, id, session=None): """Get share server DB record by ID.""" return IMPL.share_server_get(context, id, session=session) def share_server_get_all_by_host_and_share_net_valid(context, host, share_net_id, session=None): """Get share server DB records by host and share net not error.""" return IMPL.share_server_get_all_by_host_and_share_net_valid( context, host, share_net_id, session=session) def share_server_get_all(context): """Get all share server DB records.""" return IMPL.share_server_get_all(context) def share_server_get_all_by_host(context, host): """Get all share servers related to particular host.""" return IMPL.share_server_get_all_by_host(context, host) def share_server_get_all_unused_deletable(context, host, updated_before): """Get all free share servers DB records.""" return IMPL.share_server_get_all_unused_deletable(context, host, updated_before) def share_server_backend_details_set(context, share_server_id, server_details): """Create DB record with backend details.""" return IMPL.share_server_backend_details_set(context, share_server_id, server_details) ################## def share_type_create(context, values, projects=None): """Create a new share type.""" return IMPL.share_type_create(context, values, projects) def share_type_get_all(context, inactive=False, filters=None): """Get all share types. :param context: context to query under :param inactive: Include inactive share types to the result set :param filters: Filters for the query in the form of key/value. :is_public: Filter share types based on visibility: * **True**: List public share types only * **False**: List private share types only * **None**: List both public and private share types :returns: list of matching share types """ return IMPL.share_type_get_all(context, inactive, filters) def share_type_get(context, type_id, inactive=False, expected_fields=None): """Get share type by id. :param context: context to query under :param type_id: share type id to get. :param inactive: Consider inactive share types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: share type """ return IMPL.share_type_get(context, type_id, inactive, expected_fields) def share_type_get_by_name(context, name): """Get share type by name.""" return IMPL.share_type_get_by_name(context, name) def share_type_access_get_all(context, type_id): """Get all share type access of a share type.""" return IMPL.share_type_access_get_all(context, type_id) def share_type_access_add(context, type_id, project_id): """Add share type access for project.""" return IMPL.share_type_access_add(context, type_id, project_id) def share_type_access_remove(context, type_id, project_id): """Remove share type access for project.""" return IMPL.share_type_access_remove(context, type_id, project_id) def share_type_destroy(context, id): """Delete a share type.""" return IMPL.share_type_destroy(context, id) def volume_get_active_by_window(context, begin, end=None, project_id=None): """Get all the volumes inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.volume_get_active_by_window(context, begin, end, project_id) #################### def share_type_extra_specs_get(context, share_type_id): """Get all extra specs for a share type.""" return IMPL.share_type_extra_specs_get(context, share_type_id) def share_type_extra_specs_delete(context, share_type_id, key): """Delete the given extra specs item.""" return IMPL.share_type_extra_specs_delete(context, share_type_id, key) def share_type_extra_specs_update_or_create(context, share_type_id, extra_specs): """Create or update share type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument. """ return IMPL.share_type_extra_specs_update_or_create(context, share_type_id, extra_specs) def driver_private_data_get(context, host, entity_id, key=None, default=None): """Get one, list or all key-value pairs for given host and entity_id.""" return IMPL.driver_private_data_get(context, host, entity_id, key, default) def driver_private_data_update(context, host, entity_id, details, delete_existing=False): """Update key-value pairs for given host and entity_id.""" return IMPL.driver_private_data_update(context, host, entity_id, details, delete_existing) def driver_private_data_delete(context, host, entity_id, key=None): """Remove one, list or all key-value pairs for given host and entity_id.""" return IMPL.driver_private_data_delete(context, host, entity_id, key) #################### def availability_zone_get(context, id_or_name): """Get availability zone by name or id.""" return IMPL.availability_zone_get(context, id_or_name) def availability_zone_get_all(context): """Get all active availability zones.""" return IMPL.availability_zone_get_all(context) #################### def consistency_group_get(context, consistency_group_id): """Get a consistency group or raise if it does not exist.""" return IMPL.consistency_group_get(context, consistency_group_id) def consistency_group_get_all(context, detailed=True): """Get all consistency groups.""" return IMPL.consistency_group_get_all(context, detailed=detailed) def consistency_group_get_all_by_host(context, host, detailed=True): """Get all consistency groups belonging to a host.""" return IMPL.consistency_group_get_all_by_host(context, host, detailed=detailed) def consistency_group_create(context, values): """Create a consistency group from the values dictionary.""" return IMPL.consistency_group_create(context, values) def consistency_group_get_all_by_share_server(context, share_server_id): """Get all consistency groups associated with a share server.""" return IMPL.consistency_group_get_all_by_share_server(context, share_server_id) def consistency_group_get_all_by_project(context, project_id, detailed=True): """Get all consistency groups belonging to a project.""" return IMPL.consistency_group_get_all_by_project(context, project_id, detailed=detailed) def consistency_group_update(context, consistency_group_id, values): """Set the given properties on a consistency group and update it. Raises NotFound if consistency group does not exist. """ return IMPL.consistency_group_update(context, consistency_group_id, values) def consistency_group_destroy(context, consistency_group_id): """Destroy the consistency group or raise if it does not exist.""" return IMPL.consistency_group_destroy(context, consistency_group_id) def count_shares_in_consistency_group(context, consistency_group_id): """Returns the number of undeleted shares with the specified cg.""" return IMPL.count_shares_in_consistency_group(context, consistency_group_id) def count_cgsnapshots_in_consistency_group(context, consistency_group_id): """Returns the number of undeleted cgsnapshots with the specified cg.""" return IMPL.count_cgsnapshots_in_consistency_group(context, consistency_group_id) def count_consistency_groups_in_share_network(context, share_network_id, session=None): """Returns the number of undeleted cgs with the specified share network.""" return IMPL.count_consistency_groups_in_share_network(context, share_network_id) def count_cgsnapshot_members_in_share(context, share_id, session=None): """Returns the number of cgsnapshot members linked to the share.""" return IMPL.count_cgsnapshot_members_in_share(context, share_id) def cgsnapshot_get(context, cgsnapshot_id): """Get a cgsnapshot.""" return IMPL.cgsnapshot_get(context, cgsnapshot_id) def cgsnapshot_get_all(context, detailed=True): """Get all cgsnapshots.""" return IMPL.cgsnapshot_get_all(context, detailed=detailed) def cgsnapshot_get_all_by_project(context, project_id, detailed=True): """Get all cgsnapshots belonging to a project.""" return IMPL.cgsnapshot_get_all_by_project(context, project_id, detailed=detailed) def cgsnapshot_create(context, values): """Create a cgsnapshot from the values dictionary.""" return IMPL.cgsnapshot_create(context, values) def cgsnapshot_update(context, cgsnapshot_id, values): """Set the given properties on a cgsnapshot and update it. Raises NotFound if cgsnapshot does not exist. """ return IMPL.cgsnapshot_update(context, cgsnapshot_id, values) def cgsnapshot_destroy(context, cgsnapshot_id): """Destroy the cgsnapshot or raise if it does not exist.""" return IMPL.cgsnapshot_destroy(context, cgsnapshot_id) def cgsnapshot_members_get_all(context, cgsnapshot_id): """Return the members of a cgsnapshot.""" return IMPL.cgsnapshot_members_get_all(context, cgsnapshot_id) def cgsnapshot_member_create(context, values): """Create a cgsnapshot member from the values dictionary.""" return IMPL.cgsnapshot_member_create(context, values) def cgsnapshot_member_update(context, member_id, values): """Set the given properties on a cgsnapshot member and update it. Raises NotFound if cgsnapshot member does not exist. """ return IMPL.cgsnapshot_member_update(context, member_id, values) #################### def share_replicas_get_all(context, with_share_server=False, with_share_data=False): """Returns all share replicas regardless of share.""" return IMPL.share_replicas_get_all( context, with_share_server=with_share_server, with_share_data=with_share_data) def share_replicas_get_all_by_share(context, share_id, with_share_server=False, with_share_data=False): """Returns all share replicas for a given share.""" return IMPL.share_replicas_get_all_by_share( context, share_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replicas_get_available_active_replica(context, share_id, with_share_server=False, with_share_data=False): """Returns an active replica for a given share.""" return IMPL.share_replicas_get_available_active_replica( context, share_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replicas_get_active_replicas_by_share(context, share_id, with_share_server=False, with_share_data=False): """Returns all active replicas for a given share.""" return IMPL.share_replicas_get_active_replicas_by_share( context, share_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replica_get(context, replica_id, with_share_server=False, with_share_data=False): """Get share replica by id.""" return IMPL.share_replica_get( context, replica_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replica_update(context, share_replica_id, values, with_share_data=False): """Updates a share replica with given values.""" return IMPL.share_replica_update(context, share_replica_id, values, with_share_data=with_share_data) def share_replica_delete(context, share_replica_id): """Deletes a share replica.""" return IMPL.share_replica_delete(context, share_replica_id) manila-2.0.0/manila/api/0000775000567000056710000000000012701407265016131 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/auth.py0000664000567000056710000000265712701407107017451 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.api.middleware import auth from manila.i18n import _LW LOG = log.getLogger(__name__) class ManilaKeystoneContext(auth.ManilaKeystoneContext): def __init__(self, application): LOG.warning(_LW('manila.api.auth:ManilaKeystoneContext is deprecated. ' 'Please use ' 'manila.api.middleware.auth:ManilaKeystoneContext ' 'instead.')) super(ManilaKeystoneContext, self).__init__(application) def pipeline_factory(loader, global_conf, **local_conf): LOG.warning(_LW('manila.api.auth:pipeline_factory is deprecated. ' 'Please use manila.api.middleware.auth:pipeline_factory ' 'instead.')) auth.pipeline_factory(loader, global_conf, **local_conf) manila-2.0.0/manila/api/versions.py0000664000567000056710000000663212701407107020355 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from manila.api import extensions from manila.api import openstack from manila.api.openstack import api_version_request from manila.api.openstack import wsgi from manila.api.views import versions as views_versions CONF = cfg.CONF _LINKS = [{ 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/', }] _MEDIA_TYPES = [{ 'base': 'application/json', 'type': 'application/vnd.openstack.share+json;version=1', }] _KNOWN_VERSIONS = { 'v1.0': { 'id': 'v1.0', 'status': 'SUPPORTED', 'version': '', 'min_version': '', 'updated': '2015-08-27T11:33:21Z', 'links': _LINKS, 'media-types': _MEDIA_TYPES, }, 'v2.0': { 'id': 'v2.0', 'status': 'CURRENT', 'version': api_version_request._MAX_API_VERSION, 'min_version': api_version_request._MIN_API_VERSION, 'updated': '2015-08-27T11:33:21Z', 'links': _LINKS, 'media-types': _MEDIA_TYPES, }, } class VersionsRouter(openstack.APIRouter): """Route versions requests.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = create_resource() mapper.connect('versions', '/', controller=self.resources['versions'], action='all') mapper.redirect('', '/') class VersionsController(wsgi.Controller): def __init__(self): super(VersionsController, self).__init__(None) @wsgi.Controller.api_version('1.0', '1.0') def index(self, req): """Return versions supported prior to the microversions epoch.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v2.0') return builder.build_versions(known_versions) @wsgi.Controller.api_version('2.0') # noqa def index(self, req): # pylint: disable=E0102 """Return versions supported after the start of microversions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v1.0') return builder.build_versions(known_versions) # NOTE (cknight): Calling the versions API without # /v1 or /v2 in the URL will lead to this unversioned # method, which should always return info about all # available versions. @wsgi.response(300) def all(self, req): """Return all known versions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) return builder.build_versions(known_versions) def create_resource(): return wsgi.Resource(VersionsController()) manila-2.0.0/manila/api/__init__.py0000664000567000056710000000254512701407107020243 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import paste.urlmap from manila.i18n import _LW LOG = log.getLogger(__name__) CONF = cfg.CONF def root_app_factory(loader, global_conf, **local_conf): if CONF.enable_v1_api: LOG.warning(_LW('The config option enable_v1_api is deprecated, is ' 'not used, and will be removed in a future release.')) if CONF.enable_v2_api: LOG.warning(_LW('The config option enable_v2_api is deprecated, is ' 'not used, and will be removed in a future release.')) return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) manila-2.0.0/manila/api/urlmap.py0000664000567000056710000002375012701407107020005 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re try: from urllib.request import parse_http_list # noqa except ImportError: from urllib2 import parse_http_list # noqa import paste.urlmap from manila.api.openstack import wsgi _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse header into content type and options. Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) # Manila supports only application/json as MIME type for the responses. supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['manila.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) manila-2.0.0/manila/api/common.py0000664000567000056710000002642612701407112017774 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_config import cfg from oslo_log import log from six.moves.urllib import parse import webob from manila.api.openstack import api_version_request as api_version from manila.api.openstack import versioned_method from manila.i18n import _ api_common_opts = [ cfg.IntOpt( 'osapi_max_limit', default=1000, help='The maximum number of items returned in a single response from ' 'a collection resource.'), cfg.StrOpt( 'osapi_share_base_URL', help='Base URL to be presented to users in links to the Share API'), ] CONF = cfg.CONF CONF.register_opts(api_common_opts) LOG = log.getLogger(__name__) # Regex that matches alphanumeric characters, periods, hypens, # colons and underscores: # ^ assert position at start of the string # [\w\.\-\:\_] match expression # $ assert position at end of the string VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE) def validate_key_names(key_names_list): """Validate each item of the list to match key name regex.""" for key_name in key_names_list: if not VALID_KEY_NAME_REGEX.match(key_name): return False return True def get_pagination_params(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ params = {} if 'limit' in request.GET: params['limit'] = _get_limit_param(request) if 'marker' in request.GET: params['marker'] = _get_marker_param(request) return params def _get_limit_param(request): """Extract integer limit from request or fail.""" try: limit = int(request.GET['limit']) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _get_marker_param(request): """Extract marker ID from request or fail.""" return request.GET['marker'] def limited(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. :kwarg max_limit: The maximum number of items to return from 'items' """ try: offset = int(request.GET.get('offset', 0)) except ValueError: msg = _('offset param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) if offset < 0: msg = _('offset param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" params = get_pagination_params(request) limit = params.get('limit', max_limit) marker = params.get('marker') limit = min(max_limit, limit) start_index = 0 if marker: start_index = -1 for i, item in enumerate(items): if 'flavorid' in item: if item['flavorid'] == marker: start_index = i + 1 break elif item['id'] == marker or item.get('uuid') == marker: start_index = i + 1 break if start_index < 0: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) range_end = start_index + limit return items[start_index:range_end] def remove_version_from_href(href): """Removes the first api version from the href. Given: 'http://www.manila.com/v1.1/123' Returns: 'http://www.manila.com/123' Given: 'http://www.manila.com/v1.1' Returns: 'http://www.manila.com' """ parsed_url = parse.urlsplit(href) url_parts = parsed_url.path.split('/', 2) # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') if expression.match(url_parts[1]): del url_parts[1] new_path = '/'.join(url_parts) if new_path == parsed_url.path: msg = 'href %s does not contain version' % href LOG.debug(msg) raise ValueError(msg) parsed_url = list(parsed_url) parsed_url[2] = new_path return parse.urlunsplit(parsed_url) def dict_to_query_str(params): # TODO(throughnothing): we should just use urllib.urlencode instead of this # But currently we don't work with urlencoded url's param_str = "" for key, val in params.items(): param_str = param_str + '='.join([str(key), str(val)]) + '&' return param_str.rstrip('&') class ViewBuilder(object): """Model API responses as dictionaries.""" _collection_name = None _detail_version_modifiers = [] def _get_links(self, request, identifier): return [{"rel": "self", "href": self._get_href_link(request, identifier), }, {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request, identifier): """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) url = os.path.join(prefix, request.environ["manila.context"].project_id, self._collection_name) return "%s?%s" % (url, dict_to_query_str(params)) def _get_href_link(self, request, identifier): """Return an href string pointing to this object.""" prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) return os.path.join(prefix, request.environ["manila.context"].project_id, self._collection_name, str(identifier)) def _get_bookmark_link(self, request, identifier): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(request.application_url) base_url = self._update_link_prefix(base_url, CONF.osapi_share_base_URL) return os.path.join(base_url, request.environ["manila.context"].project_id, self._collection_name, str(identifier)) def _get_collection_links(self, request, items, id_key="uuid"): """Retrieve 'next' link, if applicable.""" links = [] limit = int(request.params.get("limit", 0)) if limit and limit == len(items): last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] else: last_item_id = last_item["id"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id), }) return links def _update_link_prefix(self, orig_url, prefix): if not prefix: return orig_url url_parts = list(parse.urlsplit(orig_url)) prefix_parts = list(parse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] return parse.urlunsplit(url_parts) def update_versioned_resource_dict(self, request, resource_dict, resource): """Updates teh given resource dict for the given request version. This method calls every method, that is applicable to the request version, in _detail_version_modifiers. """ for method_name in self._detail_version_modifiers: method = getattr(self, method_name) if request.api_version_request.matches_versioned_method(method): method.func(self, resource_dict, resource) @classmethod def versioned_method(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version :param experimental: flag indicating an API is experimental and is subject to change or removal at any time """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) return new_func return decorator def remove_invalid_options(context, search_options, allowed_search_options): """Remove search options that are not valid for non-admin API/context.""" if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in search_options if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%(bad_options)s' from query", {"bad_options": bad_options}) for opt in unknown_options: del search_options[opt] manila-2.0.0/manila/api/openstack/0000775000567000056710000000000012701407265020120 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/openstack/versioned_method.py0000664000567000056710000000326512701407107024031 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import utils class VersionedMethod(utils.ComparableMixin): def __init__(self, name, start_version, end_version, experimental, func): """Versioning information for a single method. Minimum and maximums are inclusive. :param name: Name of the method :param start_version: Minimum acceptable version :param end_version: Maximum acceptable_version :param experimental: True if method is experimental :param func: Method to call """ self.name = name self.start_version = start_version self.end_version = end_version self.experimental = experimental self.func = func def __str__(self): args = { 'name': self.name, 'start': self.start_version, 'end': self.end_version } return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args) def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self.start_version manila-2.0.0/manila/api/openstack/__init__.py0000664000567000056710000001136712701407107022234 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ from oslo_log import log import routes from manila.api.openstack import wsgi from manila.i18n import _ from manila.i18n import _LW from manila import wsgi as base_wsgi LOG = log.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url is "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '{project_id}/' else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`manila.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper, ext_mgr) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): ext_name = extension.extension.name collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning(_LW('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource'), {'ext_name': ext_name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': ext_name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper, ext_mgr): raise NotImplementedError class FaultWrapper(base_wsgi.Middleware): def __init__(self, application): LOG.warning(_LW('manila.api.openstack:FaultWrapper is deprecated. ' 'Please use ' 'manila.api.middleware.fault:FaultWrapper instead.')) # Avoid circular imports from here. from manila.api.middleware import fault super(FaultWrapper, self).__init__(fault.FaultWrapper(application)) manila-2.0.0/manila/api/openstack/urlmap.py0000664000567000056710000000203312701407107021763 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.api import urlmap from manila.i18n import _LW LOG = log.getLogger(__name__) def urlmap_factory(loader, global_conf, **local_conf): LOG.warning(_LW('manila.api.openstack.urlmap:urlmap_factory ' 'is deprecated. ' 'Please use manila.api.urlmap:urlmap_factory instead.')) urlmap.urlmap_factory(loader, global_conf, **local_conf) manila-2.0.0/manila/api/openstack/wsgi.py0000664000567000056710000014007612701407107021446 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import math import time from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import strutils import six import webob import webob.exc from manila.api.openstack import api_version_request as api_version from manila.api.openstack import versioned_method from manila.common import constants from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LI from manila import policy from manila import wsgi LOG = log.getLogger(__name__) SUPPORTED_CONTENT_TYPES = ( 'application/json', ) _MEDIA_TYPE_MAP = { 'application/json': 'json', } # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' # Name of header used by clients to request a specific version # of the REST API API_VERSION_REQUEST_HEADER = 'X-OpenStack-Manila-API-Version' EXPERIMENTAL_API_REQUEST_HEADER = 'X-OpenStack-Manila-API-Experimental' V1_SCRIPT_NAME = '/v1' class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) self._resource_cache = {} if not hasattr(self, 'api_version_request'): self.api_version_request = api_version.APIVersionRequest() def cache_resource(self, resource_to_cache, id_attribute='id', name=None): """Cache the given resource. Allow API methods to cache objects, such as results from a DB query, to be used by API extensions within the same API request. The resource_to_cache can be a list or an individual resource, but ultimately resources are cached individually using the given id_attribute. Different resources types might need to be cached during the same request, they can be cached using the name parameter. For example: Controller 1: request.cache_resource(db_volumes, 'volumes') request.cache_resource(db_volume_types, 'types') Controller 2: db_volumes = request.cached_resource('volumes') db_type_1 = request.cached_resource_by_id('1', 'types') If no name is given, a default name will be used for the resource. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ if not isinstance(resource_to_cache, list): resource_to_cache = [resource_to_cache] if not name: name = self.path cached_resources = self._resource_cache.setdefault(name, {}) for resource in resource_to_cache: cached_resources[resource[id_attribute]] = resource def cached_resource(self, name=None): """Get the cached resources cached under the given resource name. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. :returns: a dict of id_attribute to the resource from the cached resources, an empty map if an empty collection was cached, or None if nothing has been cached yet under this name """ if not name: name = self.path if name not in self._resource_cache: # Nothing has been cached for this key yet return None return self._resource_cache[name] def cached_resource_by_id(self, resource_id, name=None): """Get a resource by ID cached under the given resource name. Allow an API extension to get a previously stored object within the same API request. This is basically a convenience method to lookup by ID on the dictionary of all cached resources. Note that the object data will be slightly stale. :returns: the cached resource or None if the item is not in the cache """ resources = self.cached_resource(name) if not resources: # Nothing has been cached yet for this key yet return None return resources.get(resource_id) def cache_db_items(self, key, items, item_key='id'): """Cache db items. Allow API methods to store objects from a DB query to be used by API extensions within the same API request. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ self.cache_resource(items, item_key, key) def get_db_items(self, key): """Get db item by key. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. """ return self.cached_resource(key) def get_db_item(self, key, item_key): """Get db item by key and item key. Allow an API extension to get a previously stored object within the same API request. Note that the object data will be slightly stale. """ return self.get_db_items(key).get(item_key) def cache_db_share_types(self, share_types): self.cache_db_items('share_types', share_types, 'id') def cache_db_share_type(self, share_type): self.cache_db_items('share_types', [share_type], 'id') def get_db_share_types(self): return self.get_db_items('share_types') def get_db_share_type(self, share_type_id): return self.get_db_item('share_types', share_type_id) def best_match_content_type(self): """Determine the requested response content-type.""" if 'manila.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['manila.best_content_type'] = (content_type or 'application/json') return self.environ['manila.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header. """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type def set_api_version_request(self): """Set API version request based on the request header information. Microversions starts with /v2, so if a client sends a /v1 URL, then ignore the headers and request 1.0 APIs. """ if not self.script_name: self.api_version_request = api_version.APIVersionRequest() elif self.script_name == V1_SCRIPT_NAME: self.api_version_request = api_version.APIVersionRequest('1.0') else: if API_VERSION_REQUEST_HEADER in self.headers: hdr_string = self.headers[API_VERSION_REQUEST_HEADER] self.api_version_request = api_version.APIVersionRequest( hdr_string) # Check that the version requested is within the global # minimum/maximum of supported API versions if not self.api_version_request.matches( api_version.min_api_version(), api_version.max_api_version()): raise exception.InvalidGlobalAPIVersion( req_ver=self.api_version_request.get_string(), min_ver=api_version.min_api_version().get_string(), max_ver=api_version.max_api_version().get_string()) else: self.api_version_request = api_version.APIVersionRequest( api_version.DEFAULT_API_VERSION) # Check if experimental API was requested if EXPERIMENTAL_API_REQUEST_HEADER in self.headers: self.api_version_request.experimental = strutils.bool_from_string( self.headers[EXPERIMENTAL_API_REQUEST_HEADER]) class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, six.text_type(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return six.b(jsonutils.dumps(data)) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, headers=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = 200 self._code = code self._headers = headers or {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = six.text_type(value) response.headers['Content-Type'] = six.text_type(content_type) if self.obj is not None: response.body = serializer.serialize(self.obj) return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0] class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.NotAuthorized): msg = six.text_type(ex_value) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=six.text_type(ex_value))) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE('Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value)) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value)) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller, action_peek=None, **deserializers): """init method of Resource. :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(json=JSONDictSerializer) self.action_peek = dict(json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return None, '' if not content_type: LOG.debug("No Content-Type provided in request") return None, '' if len(request.body) <= 0: LOG.debug("Empty body provided in request") return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = next(gen) except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except exception.VersionNotFoundForAPIMethod: # If an attached extension (@wsgi.extends) for the # method has no version match its not an error. We # just don't run the extends code continue except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info(_LI("%(method)s %(url)s") % {"method": request.method, "url": request.url}) if self.support_api_request_version: # Set the version of the API requested based on the header try: request.set_api_version_request() except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=six.text_type(e))) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=six.text_type(e))) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) content_type, body = self.get_body(request) accept = request.best_match_content_type() # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': six.text_type(body), 'meth': six.text_type(meth)} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': six.text_type(meth)}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('manila.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings response.headers[hdr] = six.text_type(val) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = ( request.api_version_request.get_string()) if request.api_version_request.experimental: response.headers[EXPERIMENTAL_API_REQUEST_HEADER] = ( request.api_version_request.experimental) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError: if (not self.wsgi_actions or action not in ['action', 'create', 'delete']): # Propagate the error raise else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s" % body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" try: return method(req=request, **action_args) except exception.VersionNotFoundForAPIMethod: # We deliberately don't return any message information # about the exception to the user so it looks as if # the method is simply not implemented. return Fault(webob.exc.HTTPNotFound()) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] versioned_methods = None # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) if base.__name__ == "Controller": # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if VER_METHOD_ATTR in base.__dict__: versioned_methods = getattr(base, VER_METHOD_ATTR) delattr(base, VER_METHOD_ATTR) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) @six.add_metaclass(ControllerMetaclass) class Controller(object): """Default controller.""" _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None def __getattribute__(self, key): def version_select(*args, **kwargs): """Select and call the matching version of the specified method. Look for the method which matches the name supplied and version constraints and calls it with the supplied arguments. :returns: Returns the result of the method called :raises: VersionNotFoundForAPIMethod if there is no method which matches the name and version constraints """ # The first arg to all versioned methods is always the request # object. The version for the request is attached to the # request object if len(args) == 0: version_request = kwargs['req'].api_version_request else: version_request = args[0].api_version_request func_list = self.versioned_methods[key] for func in func_list: if version_request.matches_versioned_method(func): # Update the version_select wrapper function so # other decorator attributes like wsgi.response # are still respected. functools.update_wrapper(version_select, func.func) return func.func(self, *args, **kwargs) # No version match raise exception.VersionNotFoundForAPIMethod( version=version_request) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if (version_meth_dict and key in object.__getattribute__(self, VER_METHOD_ATTR)): return version_select return object.__getattribute__(self, key) # NOTE(cyeoh): This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. Add the decorator to any method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version :param experimental: flag indicating an API is experimental and is subject to change or removal at any time """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. # TODO(cyeoh): Add check to ensure that there are no overlapping # ranges of valid versions as that is ambiguous func_list.sort(reverse=True) return f return decorator @staticmethod def authorize(arg): """Decorator for checking the policy on API methods. Add this decorator to any API method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. The class must also have a class member called 'resource_name' which specifies the resource for the policy check. Can be used in any of the following forms @authorize @authorize('my_action_name') :param arg: Can either be the function being decorated or a str containing the 'action' for the policy check. If no action name is provided, the function name is assumed to be the action name. """ action_name = None def decorator(f): @functools.wraps(f) def wrapper(self, req, *args, **kwargs): action = action_name or f.__name__ context = req.environ['manila.context'] try: policy.check_policy(context, self.resource_name, action) except exception.PolicyNotAuthorized: raise webob.exc.HTTPForbidden() return f(self, req, *args, **kwargs) return wrapper if callable(arg): return decorator(arg) else: action_name = arg return decorator @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): return False return True class AdminActionsMixin(object): """Mixin class for API controllers with admin actions.""" body_attributes = { 'status': 'reset_status', 'replica_state': 'reset_replica_state', 'task_state': 'reset_task_state', } valid_statuses = { 'status': set([ constants.STATUS_CREATING, constants.STATUS_AVAILABLE, constants.STATUS_DELETING, constants.STATUS_ERROR, constants.STATUS_ERROR_DELETING, ]), 'replica_state': set([ constants.REPLICA_STATE_ACTIVE, constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR, ]), 'task_state': set(constants.TASK_STATE_STATUSES), } def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, body, status_attr='status'): update = {} try: update[status_attr] = body[status_attr] except (TypeError, KeyError): msg = _("Must specify '%s'") % status_attr raise webob.exc.HTTPBadRequest(explanation=msg) if update[status_attr] not in self.valid_statuses[status_attr]: expl = (_("Invalid state. Valid states: %s.") % ", ".join(self.valid_statuses[status_attr])) raise webob.exc.HTTPBadRequest(explanation=expl) return update @Controller.authorize('reset_status') def _reset_status(self, req, id, body, status_attr='status'): """Reset the status_attr specified on the resource.""" context = req.environ['manila.context'] body_attr = self.body_attributes[status_attr] update = self.validate_update( body.get(body_attr, body.get('-'.join(('os', body_attr)))), status_attr=status_attr) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) try: self._update(context, id, update) except exception.NotFound as e: raise webob.exc.HTTPNotFound(six.text_type(e)) return webob.Response(status_int=202) @Controller.authorize('force_delete') def _force_delete(self, req, id, body): """Delete a resource, bypassing the check for status.""" context = req.environ['manila.context'] try: resource = self._get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(six.text_type(e)) self._delete(context, resource, force=True) return webob.Response(status_int=202) class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = {400: "badRequest", 401: "unauthorized", 403: "forbidden", 404: "itemNotFound", 405: "badMethod", 409: "conflictingRequest", 413: "overLimit", 415: "badMediaType", 501: "notImplemented", 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") fault_data = { fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry if not req.api_version_request.is_null(): self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = ( req.api_version_request.get_string()) if req.api_version_request.experimental: self.wrapped_exc.headers[EXPERIMENTAL_API_REQUEST_HEADER] = ( req.api_version_request.experimental) self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('manila.context') if context: headers['x-compute-request-id'] = context.request_id class OverLimitFault(webob.exc.HTTPException): """Rate-limited request response.""" def __init__(self, message, details, retry_time): """Initialize new `OverLimitFault` with relevant information.""" hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { "overLimitFault": { "code": self.wrapped_exc.status_int, "message": message, "details": details, }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """Wrap the exception. Wrap the exception with a serialized body conforming to our error format. """ content_type = request.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content return self.wrapped_exc manila-2.0.0/manila/api/openstack/api_version_request.py0000664000567000056710000001617512701407107024565 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from manila.api.openstack import versioned_method from manila import exception from manila.i18n import _ from manila import utils # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """ REST API Version History: * 1.0 - Initial version. Includes all V1 APIs and extensions in Kilo. * 2.0 - Versions API updated to reflect beginning of microversions epoch. * 2.1 - Share create() doesn't ignore availability_zone field of share. * 2.2 - Snapshots become optional feature. * 2.3 - Share instances admin API * 2.4 - Consistency Group support * 2.5 - Share Migration admin API * 2.6 - Return share_type UUID instead of name in Share API * 2.7 - Rename old extension-like API URLs to core-API-like * 2.8 - Attr "is_public" can be set for share using API "manage" * 2.9 - Add export locations API * 2.10 - Field 'access_rules_status' was added to shares and share instances. * 2.11 - Share Replication support * 2.12 - Manage/unmanage snapshot API. * 2.13 - Add "cephx" auth type to allow_access * 2.14 - 'Preferred' attribute in export location metadata * 2.15 - Added Share migration 'migration_cancel', 'migration_get_progress', 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and added notify parameter to 'migration_start'. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # the minimum version of the API supported. _MIN_API_VERSION = "2.0" _MAX_API_VERSION = "2.15" DEFAULT_API_VERSION = _MIN_API_VERSION # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) class APIVersionRequest(utils.ComparableMixin): """This class represents an API Version Request. This class includes convenience methods for manipulation and comparison of version numbers as needed to implement API microversions. """ def __init__(self, version_string=None, experimental=False): """Create an API version request object.""" self._ver_major = None self._ver_minor = None self._experimental = experimental if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self._ver_major = int(match.group(1)) self._ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request Major: %(major)s, Minor: %(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) def is_null(self): return self._ver_major is None and self._ver_minor is None def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self._ver_major, self._ver_minor @property def experimental(self): return self._experimental @experimental.setter def experimental(self, value): if type(value) != bool: msg = _('The experimental property must be a bool value.') raise exception.InvalidParameterValue(err=msg) self._experimental = value def matches_versioned_method(self, method): """Compares this version to that of a versioned method.""" if type(method) != versioned_method.VersionedMethod: msg = _('An API version request must be compared ' 'to a VersionedMethod object.') raise exception.InvalidParameterValue(err=msg) return self.matches(method.start_version, method.end_version, method.experimental) def matches(self, min_version, max_version, experimental=False): """Compares this version to the specified min/max range. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :param experimental: Whether to match experimental APIs. :returns: boolean """ if self.is_null(): raise ValueError # NOTE(cknight): An experimental request should still match a # non-experimental API, so the experimental check isn't just # looking for equality. if not self.experimental and experimental: return False if max_version.is_null() and min_version.is_null(): return True elif max_version.is_null(): return min_version <= self elif min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Returns a string representation of this object. If this method is used to create an APIVersionRequest, the resulting object will be an equivalent request. """ if self.is_null(): raise ValueError return ("%(major)s.%(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) manila-2.0.0/manila/api/openstack/rest_api_version_history.rst0000664000567000056710000000554412701407107026011 0ustar jenkinsjenkins00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.0 --- The 1.0 Manila API includes all v1 core APIs existing prior to the introduction of microversions. The /v1 URL is used to call 1.0 APIs, and microversions headers sent to this endpoint are ignored. 2.0 --- This is the initial version of the Manila API which supports microversions. The /v2 URL is used to call 2.x APIs. A user can specify a header in the API request:: X-OpenStack-Manila-API-Version: where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 2.0 was requested. The only API change in version 2.0 is versions, i.e. GET http://localhost:8786/, which now returns information about both 1.0 and 2.x versions and their respective /v1 and /v2 endpoints. All other 2.0 APIs are functionally identical to version 1.0. 2.1 --- Share create() method doesn't ignore availability_zone field of provided share. 2.2 --- Snapshots become optional and share payload now has boolean attr 'snapshot_support'. 2.3 --- Share instances admin API and update of Admin Actions extension. 2.4 --- Consistency groups support. /consistency-groups and /cgsnapshots are implemented. AdminActions 'os-force_delete and' 'os-reset_status' have been updated for both new resources. 2.5 --- Share Migration admin API. 2.6 --- Return share_type UUID instead of name in Share API and add share_type_name field. 2.7 --- Rename old extension-like API URLs to core-API-like. 2.8 --- Allow to set share visibility explicitly using "manage" API. 2.9 --- Add export locations API. Remove export locations from "shares" and "share instances" APIs. 2.10 ---- Field 'access_rules_status' was added to shares and share instances. 2.11 ---- Share Replication support added. All Share replication APIs are tagged 'Experimental'. Share APIs return two new attributes: 'has_replicas' and 'replication_type'. Share instance APIs return a new attribute, 'replica_state'. 2.12 ---- Share snapshot manage and unmanage API. 2.13 ---- Add 'cephx' authentication type for the CephFS Native driver. 2.14 ____ Added attribute 'preferred' to export locations. Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. Also, change 'uuid' field to 'id', move timestamps to detail view, and return all non-admin fields to users. 2.15 ---- Added Share migration 'migration_cancel', 'migration_get_progress', 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and added notify parameter to 'migration_start'. manila-2.0.0/manila/api/contrib/0000775000567000056710000000000012701407265017571 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/contrib/__init__.py0000664000567000056710000000226412701407107021701 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with manila. It can't be called 'extensions' because that causes namespacing problems. """ from oslo_config import cfg from oslo_log import log from manila.api import extensions CONF = cfg.CONF LOG = log.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.osapi_share_ext_list) manila-2.0.0/manila/api/extensions.py0000664000567000056710000002756312701407107020712 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log from oslo_utils import importutils import six import webob.dec import webob.exc import manila.api.openstack from manila.api.openstack import wsgi from manila import exception from manila.i18n import _LE from manila.i18n import _LI from manila.i18n import _LW import manila.policy CONF = cfg.CONF LOG = log.getLogger(__name__) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # Description comes from the docstring for the class # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise webob.exc.HTTPNotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See manila/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.info(_LI('Initializing extension manager.')) self.cls_list = CONF.osapi_share_extension self.extensions = {} self._load_extensions() def is_loaded(self, alias): return alias in self.extensions def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.info(_LI('Loaded extension: %s'), alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.name) LOG.debug('Ext alias: %s', extension.alias) LOG.debug('Ext description: %s', ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext updated: %s', extension.updated) except AttributeError as ex: LOG.exception(_LE("Exception loading extension: %s"), six.text_type(ex)) return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('manila.api.openstack.share.contrib.' 'standard_extensions') new_contrib_path = 'manila.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warning(_LW('osapi_share_extension is set to deprecated path: ' '%s.'), old_contrib_path) LOG.warning(_LW('Please set your flag or manila.conf settings for ' 'osapi_share_extension to: %s.'), new_contrib_path) extensions = [e.replace(old_contrib_path, new_contrib_path) for e in extensions] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning(_LW('Failed to load extension %(ext_factory)s: ' '%(exc)s.'), {"ext_factory": ext_factory, "exc": exc}) class ControllerExtension(object): """Extend core controllers of manila OpenStack API. Provide a way to extend existing manila OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in manila.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py if ext != '.py' or root == '__init__': continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning(_LW('Failed to load extension %(classpath)s: ' '%(exc)s.'), {"classpath": classpath, "exc": exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning(_LW('Failed to load extension ' '%(ext_name)s: %(exc)s.'), {"ext_name": ext_name, "exc": exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs def extension_authorizer(api_name, extension_name): def authorize(context, target=None, action=None): if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} if action is None: act = '%s_extension:%s' % (api_name, extension_name) else: act = '%s_extension:%s:%s' % (api_name, extension_name, action) manila.policy.enforce(context, act, target) return authorize def soft_extension_authorizer(api_name, extension_name): hard_authorize = extension_authorizer(api_name, extension_name) def authorize(context): try: hard_authorize(context) return True except exception.NotAuthorized: return False return authorize manila-2.0.0/manila/api/views/0000775000567000056710000000000012701407265017266 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/views/shares.py0000664000567000056710000001366512701407107021133 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'shares' _detail_version_modifiers = [ "add_snapshot_support_field", "add_consistency_group_fields", "add_task_state_field", "modify_share_type_field", "remove_export_locations", "add_access_rules_status_field", "add_replication_fields", ] def summary_list(self, request, shares): """Show a list of shares without many details.""" return self._list_view(self.summary, request, shares) def detail_list(self, request, shares): """Detailed view of a list of shares.""" return self._list_view(self.detail, request, shares) def summary(self, request, share): """Generic, non-detailed view of a share.""" return { 'share': { 'id': share.get('id'), 'name': share.get('display_name'), 'links': self._get_links(request, share['id']) } } def detail(self, request, share): """Detailed view of a single share.""" context = request.environ['manila.context'] metadata = share.get('share_metadata') if metadata: metadata = {item['key']: item['value'] for item in metadata} else: metadata = {} export_locations = share.get('export_locations', []) if share['share_type_id'] and share.get('share_type'): share_type = share['share_type']['name'] else: share_type = share['share_type_id'] share_instance = share.get('instance') or {} share_dict = { 'id': share.get('id'), 'size': share.get('size'), 'availability_zone': share_instance.get('availability_zone'), 'created_at': share.get('created_at'), 'status': share.get('status'), 'name': share.get('display_name'), 'description': share.get('display_description'), 'project_id': share.get('project_id'), 'host': share_instance.get('host'), 'snapshot_id': share.get('snapshot_id'), 'share_network_id': share_instance.get('share_network_id'), 'share_proto': share.get('share_proto'), 'export_location': share.get('export_location'), 'metadata': metadata, 'share_type': share_type, 'volume_type': share_type, 'links': self._get_links(request, share['id']), 'is_public': share.get('is_public'), 'export_locations': export_locations, } self.update_versioned_resource_dict(request, share_dict, share) if context.is_admin: share_dict['share_server_id'] = share_instance.get( 'share_server_id') return {'share': share_dict} def migration_get_progress(self, progress): result = { 'total_progress': progress['total_progress'], 'current_file_path': progress['current_file_path'], 'current_file_progress': progress['current_file_progress'] } return result @common.ViewBuilder.versioned_method("2.2") def add_snapshot_support_field(self, share_dict, share): share_dict['snapshot_support'] = share.get('snapshot_support') @common.ViewBuilder.versioned_method("2.4") def add_consistency_group_fields(self, share_dict, share): share_dict['consistency_group_id'] = share.get( 'consistency_group_id') share_dict['source_cgsnapshot_member_id'] = share.get( 'source_cgsnapshot_member_id') @common.ViewBuilder.versioned_method("2.5") def add_task_state_field(self, share_dict, share): share_dict['task_state'] = share.get('task_state') @common.ViewBuilder.versioned_method("2.6") def modify_share_type_field(self, share_dict, share): share_type = share.get('share_type_id') share_type_name = None if share.get('share_type'): share_type_name = share.get('share_type').get('name') share_dict.update({ 'share_type_name': share_type_name, 'share_type': share_type, }) @common.ViewBuilder.versioned_method("2.9") def remove_export_locations(self, share_dict, share): share_dict.pop('export_location') share_dict.pop('export_locations') @common.ViewBuilder.versioned_method("2.10") def add_access_rules_status_field(self, share_dict, share): share_dict['access_rules_status'] = share.get('access_rules_status') @common.ViewBuilder.versioned_method('2.11') def add_replication_fields(self, share_dict, share): share_dict['replication_type'] = share.get('replication_type') share_dict['has_replicas'] = share['has_replicas'] def _list_view(self, func, request, shares): """Provide a view for a list of shares.""" shares_list = [func(request, share)['share'] for share in shares] shares_links = self._get_collection_links(request, shares, self._collection_name) shares_dict = dict(shares=shares_list) if shares_links: shares_dict['shares_links'] = shares_links return shares_dict manila-2.0.0/manila/api/views/limits.py0000664000567000056710000000724512701407107021144 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils class ViewBuilder(object): """OpenStack API base limits view builder.""" def build(self, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(absolute_limits) output = { "limits": { "rate": rate_limits, "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, absolute_limits): """Builder for absolute limits. absolute_limits should be given as a dict of limits. For example: {"limit": {"shares": 10, "gigabytes": 1024}, "in_use": {"shares": 8, "gigabytes": 256}}. """ limit_names = { "limit": { "gigabytes": ["maxTotalShareGigabytes"], "snapshot_gigabytes": ["maxTotalSnapshotGigabytes"], "shares": ["maxTotalShares"], "snapshots": ["maxTotalShareSnapshots"], "share_networks": ["maxTotalShareNetworks"], }, "in_use": { "shares": ["totalSharesUsed"], "snapshots": ["totalShareSnapshotsUsed"], "share_networks": ["totalShareNetworksUsed"], "gigabytes": ["totalShareGigabytesUsed"], "snapshot_gigabytes": ["totalSnapshotGigabytesUsed"], }, } limits = {} for mapping_key in limit_names.keys(): for k, v in absolute_limits.get(mapping_key, {}).items(): if k in limit_names.get(mapping_key, []) and v is not None: for name in limit_names[mapping_key][k]: limits[name] = v return limits def _build_rate_limits(self, rate_limits): limits = [] for rate_limit in rate_limits: _rate_limit_key = None _rate_limit = self._build_rate_limit(rate_limit) # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break # ensure we have a key if we didn't find one if not _rate_limit_key: _rate_limit_key = { "uri": rate_limit["URI"], "regex": rate_limit["regex"], "limit": [], } limits.append(_rate_limit_key) _rate_limit_key["limit"].append(_rate_limit) return limits def _build_rate_limit(self, rate_limit): _get_utc = datetime.datetime.utcfromtimestamp next_avail = _get_utc(rate_limit["resetTime"]) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": timeutils.isotime(at=next_avail), } manila-2.0.0/manila/api/views/versions.py0000664000567000056710000000426212701407107021507 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack LLC. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from six.moves import urllib def get_view_builder(req): return ViewBuilder(req.application_url) _URL_SUFFIX = {'v1.0': 'v1', 'v2.0': 'v2'} class ViewBuilder(object): def __init__(self, base_url): """Initialize ViewBuilder. :param base_url: url of the root wsgi application """ self.base_url = base_url def build_versions(self, versions): views = [self._build_version(versions[key]) for key in sorted(list(versions.keys()))] return dict(versions=views) def _build_version(self, version): view = copy.deepcopy(version) view['links'] = self._build_links(version) return view def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" links = copy.deepcopy(version_data.get('links', {})) version = _URL_SUFFIX.get(version_data['id']) links.append({'rel': 'self', 'href': self._generate_href(version=version)}) return links def _generate_href(self, version='v1', path=None): """Create a URL that refers to a specific version_number.""" base_url = self._get_base_url_without_version() href = urllib.parse.urljoin(base_url, version).rstrip('/') + '/' if path: href += path.lstrip('/') return href def _get_base_url_without_version(self): """Get the base URL with out the /v1 suffix.""" return re.sub('v[1-9]+/?$', '', self.base_url) manila-2.0.0/manila/api/views/types.py0000664000567000056710000000557112701407107021007 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.share import share_types class ViewBuilder(common.ViewBuilder): _collection_name = 'types' _detail_version_modifiers = [ "add_is_public_attr_core_api_like", "add_is_public_attr_extension_like", ] def show(self, request, share_type, brief=False): """Trim away extraneous share type attributes.""" extra_specs = share_type.get('extra_specs', {}) required_extra_specs = share_type.get('required_extra_specs', {}) # Remove non-tenant-visible extra specs in a non-admin context if not request.environ['manila.context'].is_admin: extra_spec_names = share_types.get_tenant_visible_extra_specs() extra_specs = self._filter_extra_specs(extra_specs, extra_spec_names) required_extra_specs = self._filter_extra_specs( required_extra_specs, extra_spec_names) trimmed = { 'id': share_type.get('id'), 'name': share_type.get('name'), 'extra_specs': extra_specs, 'required_extra_specs': required_extra_specs, } self.update_versioned_resource_dict(request, trimmed, share_type) if brief: return trimmed else: return dict(volume_type=trimmed, share_type=trimmed) @common.ViewBuilder.versioned_method("2.7") def add_is_public_attr_core_api_like(self, share_type_dict, share_type): share_type_dict['share_type_access:is_public'] = share_type.get( 'is_public', True) @common.ViewBuilder.versioned_method("1.0", "2.6") def add_is_public_attr_extension_like(self, share_type_dict, share_type): share_type_dict['os-share-type-access:is_public'] = share_type.get( 'is_public', True) def index(self, request, share_types): """Index over trimmed share types.""" share_types_list = [self.show(request, share_type, True) for share_type in share_types] return dict(volume_types=share_types_list, share_types=share_types_list) def _filter_extra_specs(self, extra_specs, valid_keys): return {key: value for key, value in extra_specs.items() if key in valid_keys} manila-2.0.0/manila/api/views/quota_sets.py0000664000567000056710000000221212701407107022017 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "quota_set" def detail_list(self, quota_set, project_id=None): """Detailed view of quota set.""" keys = ( 'shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes', 'share_networks', ) view = {key: quota_set.get(key) for key in keys} if project_id: view['id'] = project_id return {self._collection_name: view} manila-2.0.0/manila/api/views/export_locations.py0000664000567000056710000000622612701407107023235 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from manila.api import common class ViewBuilder(common.ViewBuilder): """Model export-locations API responses as a python dictionary.""" _collection_name = "export_locations" _detail_version_modifiers = [ 'add_preferred_path_attribute', ] def _get_export_location_view(self, request, export_location, detail=False): context = request.environ['manila.context'] view = { 'id': export_location['uuid'], 'path': export_location['path'], } self.update_versioned_resource_dict(request, view, export_location) if context.is_admin: view['share_instance_id'] = export_location[ 'share_instance_id'] view['is_admin_only'] = export_location['is_admin_only'] if detail: view['created_at'] = export_location['created_at'] view['updated_at'] = export_location['updated_at'] return {'export_location': view} def summary(self, request, export_location): """Summary view of a single export location.""" return self._get_export_location_view(request, export_location, detail=False) def detail(self, request, export_location): """Detailed view of a single export location.""" return self._get_export_location_view(request, export_location, detail=True) def _list_export_locations(self, request, export_locations, detail=False): """View of export locations list.""" view_method = self.detail if detail else self.summary return {self._collection_name: [ view_method(request, export_location)['export_location'] for export_location in export_locations ]} def detail_list(self, request, export_locations): """Detailed View of export locations list.""" return self._list_export_locations(request, export_locations, detail=True) def summary_list(self, request, export_locations): """Summary View of export locations list.""" return self._list_export_locations(request, export_locations, detail=False) @common.ViewBuilder.versioned_method('2.14') def add_preferred_path_attribute(self, view_dict, export_location): view_dict['preferred'] = strutils.bool_from_string( export_location['el_metadata'].get('preferred')) manila-2.0.0/manila/api/views/share_servers.py0000664000567000056710000000371612701407107022515 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_servers' def build_share_server(self, share_server): """View of a share server.""" return { 'share_server': self._build_share_server_view(share_server, detailed=True) } def build_share_servers(self, share_servers): return { 'share_servers': [self._build_share_server_view(share_server) for share_server in share_servers] } def build_share_server_details(self, details): return {'details': details} def _build_share_server_view(self, share_server, detailed=False): share_server_dict = { 'id': share_server.id, 'project_id': share_server.project_id, 'updated_at': share_server.updated_at, 'status': share_server.status, 'host': share_server.host, 'share_network_name': share_server.share_network_name, 'share_network_id': share_server.share_network_id, } if detailed: share_server_dict['created_at'] = share_server.created_at share_server_dict['backend_details'] = share_server.backend_details return share_server_dict manila-2.0.0/manila/api/views/share_networks.py0000664000567000056710000000422312701407107022672 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_networks' def build_share_network(self, share_network): """View of a share network.""" return {'share_network': self._build_share_network_view(share_network)} def build_share_networks(self, share_networks, is_detail=True): return {'share_networks': [self._build_share_network_view(share_network, is_detail) for share_network in share_networks]} def _build_share_network_view(self, share_network, is_detail=True): sn = { 'id': share_network.get('id'), 'name': share_network.get('name'), } if is_detail: sn.update({ 'project_id': share_network.get('project_id'), 'created_at': share_network.get('created_at'), 'updated_at': share_network.get('updated_at'), 'neutron_net_id': share_network.get('neutron_net_id'), 'neutron_subnet_id': share_network.get('neutron_subnet_id'), 'nova_net_id': share_network.get('nova_net_id'), 'network_type': share_network.get('network_type'), 'segmentation_id': share_network.get('segmentation_id'), 'cidr': share_network.get('cidr'), 'ip_version': share_network.get('ip_version'), 'description': share_network.get('description'), }) return sn manila-2.0.0/manila/api/views/__init__.py0000664000567000056710000000000012701407107021360 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/views/share_instance.py0000664000567000056710000000637412701407107022633 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_instances' _detail_version_modifiers = [ "remove_export_locations", "add_access_rules_status_field", "add_replication_fields", ] def detail_list(self, request, instances): """Detailed view of a list of share instances.""" return self._list_view(self.detail, request, instances) def detail(self, request, share_instance): """Detailed view of a single share instance.""" export_locations = [e['path'] for e in share_instance.export_locations] instance_dict = { 'id': share_instance.get('id'), 'share_id': share_instance.get('share_id'), 'availability_zone': share_instance.get('availability_zone'), 'created_at': share_instance.get('created_at'), 'host': share_instance.get('host'), 'status': share_instance.get('status'), 'share_network_id': share_instance.get('share_network_id'), 'share_server_id': share_instance.get('share_server_id'), 'export_location': share_instance.get('export_location'), 'export_locations': export_locations, } self.update_versioned_resource_dict( request, instance_dict, share_instance) return {'share_instance': instance_dict} def _list_view(self, func, request, instances): """Provide a view for a list of share instances.""" instances_list = [func(request, instance)['share_instance'] for instance in instances] instances_links = self._get_collection_links(request, instances, self._collection_name) instances_dict = {self._collection_name: instances_list} if instances_links: instances_dict[self._collection_name] = instances_links return instances_dict @common.ViewBuilder.versioned_method("2.9") def remove_export_locations(self, share_instance_dict, share_instance): share_instance_dict.pop('export_location') share_instance_dict.pop('export_locations') @common.ViewBuilder.versioned_method("2.10") def add_access_rules_status_field(self, instance_dict, share_instance): instance_dict['access_rules_status'] = ( share_instance.get('access_rules_status') ) @common.ViewBuilder.versioned_method("2.11") def add_replication_fields(self, instance_dict, share_instance): instance_dict['replica_state'] = share_instance.get('replica_state') manila-2.0.0/manila/api/views/services.py0000664000567000056710000000226312701407107021461 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "services" def summary(self, service): """Summary view of a single service.""" keys = 'host', 'binary', 'disabled' return {key: service.get(key) for key in keys} def detail_list(self, services): """Detailed view of a list of services.""" keys = 'id', 'binary', 'host', 'zone', 'status', 'state', 'updated_at' views = [{key: s.get(key) for key in keys} for s in services] return {self._collection_name: views} manila-2.0.0/manila/api/views/share_replicas.py0000664000567000056710000000563112701407107022624 0ustar jenkinsjenkins00000000000000# Copyright 2015 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ReplicationViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_replicas' _collection_links = 'share_replica_links' def summary_list(self, request, replicas): """Summary view of a list of replicas.""" return self._list_view(self.summary, request, replicas) def detail_list(self, request, replicas): """Detailed view of a list of replicas.""" return self._list_view(self.detail, request, replicas) def summary(self, request, replica): """Generic, non-detailed view of a share replica.""" replica_dict = { 'id': replica.get('id'), 'share_id': replica.get('share_id'), 'status': replica.get('status'), 'replica_state': replica.get('replica_state'), } return {'share_replica': replica_dict} def detail(self, request, replica): """Detailed view of a single replica.""" context = request.environ['manila.context'] replica_dict = { 'id': replica.get('id'), 'share_id': replica.get('share_id'), 'availability_zone': replica.get('availability_zone'), 'created_at': replica.get('created_at'), 'host': replica.get('host'), 'status': replica.get('status'), 'share_network_id': replica.get('share_network_id'), 'replica_state': replica.get('replica_state'), 'updated_at': replica.get('updated_at'), } if context.is_admin: replica_dict['share_server_id'] = replica.get('share_server_id') return {'share_replica': replica_dict} def _list_view(self, func, request, replicas): """Provide a view for a list of replicas.""" replicas_list = [func(request, replica)['share_replica'] for replica in replicas] replica_links = self._get_collection_links(request, replicas, self._collection_name) replicas_dict = {self._collection_name: replicas_list} if replica_links: replicas_dict[self._collection_links] = replica_links return replicas_dict manila-2.0.0/manila/api/views/security_service.py0000664000567000056710000000522212701407107023223 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.common import constants class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'security_services' def summary_list(self, request, security_services): """Show a list of security services without many details.""" return self._list_view(self.summary, request, security_services) def detail_list(self, request, security_services): """Detailed view of a list of security services.""" return self._list_view(self.detail, request, security_services) def summary(self, request, security_service): """Generic, non-detailed view of a security service.""" return { 'security_service': { 'id': security_service.get('id'), 'name': security_service.get('name'), 'type': security_service.get('type'), # NOTE(vponomaryov): attr "status" was removed from model and # is left in view for compatibility purposes since it affects # user-facing API. This should be removed right after no one # uses it anymore. 'status': constants.STATUS_NEW, } } def detail(self, request, security_service): """Detailed view of a single security service.""" view = self.summary(request, security_service) keys = ( 'created_at', 'updated_at', 'description', 'dns_ip', 'server', 'domain', 'user', 'password', 'project_id') for key in keys: view['security_service'][key] = security_service.get(key) return view def _list_view(self, func, request, security_services): """Provide a view for a list of security services.""" security_services_list = [func(request, service)['security_service'] for service in security_services] security_services_dict = dict(security_services=security_services_list) return security_services_dict manila-2.0.0/manila/api/views/quota_class_sets.py0000664000567000056710000000223112701407107023205 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "quota_class_set" def detail_list(self, quota_set, quota_class=None): """Detailed view of quota class set.""" keys = ( 'shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes', 'share_networks', ) view = {key: quota_set.get(key) for key in keys} if quota_class: view['id'] = quota_class return {self._collection_name: view} manila-2.0.0/manila/api/views/scheduler_stats.py0000664000567000056710000000343212701407107023031 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 eBay Inc. # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model scheduler-stats API responses as a python dictionary.""" _collection_name = "scheduler-stats" def pool_summary(self, pool): """Summary view of a single pool.""" return { 'pool': { 'name': pool.get('name'), 'host': pool.get('host'), 'backend': pool.get('backend'), 'pool': pool.get('pool'), } } def pool_detail(self, pool): """Detailed view of a single pool.""" return { 'pool': { 'name': pool.get('name'), 'host': pool.get('host'), 'backend': pool.get('backend'), 'pool': pool.get('pool'), 'capabilities': pool.get('capabilities'), } } def pools(self, pools, detail=False): """View of a list of pools seen by scheduler.""" view_method = self.pool_detail if detail else self.pool_summary return {"pools": [view_method(pool)['pool'] for pool in pools]} manila-2.0.0/manila/api/views/consistency_groups.py0000664000567000056710000000561112701407107023576 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistency groups API.""" from manila.api import common class CGViewBuilder(common.ViewBuilder): """Model a consistency group API response as a python dictionary.""" _collection_name = 'consistency_groups' def summary_list(self, request, cgs): """Show a list of consistency groups without many details.""" return self._list_view(self.summary, request, cgs) def detail_list(self, request, cgs): """Detailed view of a list of consistency groups.""" return self._list_view(self.detail, request, cgs) def summary(self, request, cg): """Generic, non-detailed view of a consistency group.""" return { 'consistency_group': { 'id': cg.get('id'), 'name': cg.get('name'), 'links': self._get_links(request, cg['id']) } } def detail(self, request, cg): """Detailed view of a single consistency group.""" context = request.environ['manila.context'] cg_dict = { 'id': cg.get('id'), 'name': cg.get('name'), 'created_at': cg.get('created_at'), 'status': cg.get('status'), 'description': cg.get('description'), 'project_id': cg.get('project_id'), 'host': cg.get('host'), 'source_cgsnapshot_id': cg.get('source_cgsnapshot_id'), 'share_network_id': cg.get('share_network_id'), 'share_types': [st['share_type_id'] for st in cg.get( 'share_types')], 'links': self._get_links(request, cg['id']), } if context.is_admin: cg_dict['share_server_id'] = cg_dict.get('share_server_id') return {'consistency_group': cg_dict} def _list_view(self, func, request, shares): """Provide a view for a list of consistency groups.""" cg_list = [func(request, share)['consistency_group'] for share in shares] cgs_links = self._get_collection_links(request, shares, self._collection_name) cgs_dict = dict(consistency_groups=cg_list) if cgs_links: cgs_dict['consistency_groups_links'] = cgs_links return cgs_dict manila-2.0.0/manila/api/views/availability_zones.py0000664000567000056710000000224512701407107023526 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "availability_zones" def _detail(self, availability_zone): """Detailed view of a single availability zone.""" keys = ('id', 'name', 'created_at', 'updated_at') return {key: availability_zone.get(key) for key in keys} def detail_list(self, availability_zones): """Detailed view of a list of availability zones.""" azs = [self._detail(az) for az in availability_zones] return {self._collection_name: azs} manila-2.0.0/manila/api/views/cgsnapshots.py0000664000567000056710000000673712701407107022204 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistency groups snapshot API.""" from manila.api import common class CGSnapshotViewBuilder(common.ViewBuilder): """Model a cgsnapshot API response as a python dictionary.""" _collection_name = 'cgsnapshot' def summary_list(self, request, cgs): """Show a list of cgsnapshots without many details.""" return self._list_view(self.summary, request, cgs) def detail_list(self, request, cgs): """Detailed view of a list of cgsnapshots.""" return self._list_view(self.detail, request, cgs) def member_list(self, request, members): members_list = [] for member in members: member_dict = { 'id': member.get('id'), 'created_at': member.get('created_at'), 'size': member.get('size'), 'share_protocol': member.get('share_proto'), 'project_id': member.get('project_id'), 'share_type_id': member.get('share_type_id'), 'cgsnapshot_id': member.get('cgsnapshot_id'), 'share_id': member.get('share_id'), } members_list.append(member_dict) members_links = self._get_collection_links(request, members, 'cgsnapshot_id') members_dict = dict(cgsnapshot_members=members_list) if members_links: members_dict['cgsnapshot_members_links'] = members_links return members_dict def summary(self, request, cg): """Generic, non-detailed view of a cgsnapshot.""" return { 'cgsnapshot': { 'id': cg.get('id'), 'name': cg.get('name'), 'links': self._get_links(request, cg['id']) } } def detail(self, request, cg): """Detailed view of a single cgsnapshot.""" cg_dict = { 'id': cg.get('id'), 'name': cg.get('name'), 'created_at': cg.get('created_at'), 'status': cg.get('status'), 'description': cg.get('description'), 'project_id': cg.get('project_id'), 'consistency_group_id': cg.get('consistency_group_id'), 'links': self._get_links(request, cg['id']), } return {'cgsnapshot': cg_dict} def _list_view(self, func, request, snaps): """Provide a view for a list of cgsnapshots.""" snap_list = [func(request, snap)['cgsnapshot'] for snap in snaps] snaps_links = self._get_collection_links(request, snaps, self._collection_name) snaps_dict = dict(cgsnapshots=snap_list) if snaps_links: snaps_dict['cgsnapshot_links'] = snaps_links return snaps_dict manila-2.0.0/manila/api/views/share_snapshots.py0000664000567000056710000000644112701407107023044 0ustar jenkinsjenkins00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'snapshots' _detail_version_modifiers = [ "add_provider_location_field", ] def summary_list(self, request, snapshots): """Show a list of share snapshots without many details.""" return self._list_view(self.summary, request, snapshots) def detail_list(self, request, snapshots): """Detailed view of a list of share snapshots.""" return self._list_view(self.detail, request, snapshots) def summary(self, request, snapshot): """Generic, non-detailed view of an share snapshot.""" return { 'snapshot': { 'id': snapshot.get('id'), 'name': snapshot.get('display_name'), 'links': self._get_links(request, snapshot['id']) } } def detail(self, request, snapshot): """Detailed view of a single share snapshot.""" snapshot_dict = { 'id': snapshot.get('id'), 'share_id': snapshot.get('share_id'), 'share_size': snapshot.get('share_size'), 'created_at': snapshot.get('created_at'), 'status': snapshot.get('aggregate_status'), 'name': snapshot.get('display_name'), 'description': snapshot.get('display_description'), 'size': snapshot.get('size'), 'share_proto': snapshot.get('share_proto'), 'links': self._get_links(request, snapshot['id']), } # NOTE(xyang): Only retrieve provider_location for admin. context = request.environ['manila.context'] if context.is_admin: self.update_versioned_resource_dict(request, snapshot_dict, snapshot) return {'snapshot': snapshot_dict} @common.ViewBuilder.versioned_method("2.12") def add_provider_location_field(self, snapshot_dict, snapshot): snapshot_dict['provider_location'] = snapshot.get('provider_location') def _list_view(self, func, request, snapshots): """Provide a view for a list of share snapshots.""" snapshots_list = [func(request, snapshot)['snapshot'] for snapshot in snapshots] snapshots_links = self._get_collection_links(request, snapshots, self._collection_name) snapshots_dict = {self._collection_name: snapshots_list} if snapshots_links: snapshots_dict['share_snapshots_links'] = snapshots_links return snapshots_dict manila-2.0.0/manila/api/middleware/0000775000567000056710000000000012701407265020246 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/middleware/auth.py0000664000567000056710000001304212701407107021554 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ import os from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import webob.dec import webob.exc from manila.api.openstack import wsgi from manila import context from manila.i18n import _ from manila import wsgi as base_wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') CONF = cfg.CONF CONF.register_opt(use_forwarded_for_opt) LOG = log.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[CONF.auth_strategy] if not CONF.api_rate_limit: limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app class InjectContext(base_wsgi.Middleware): """Add a 'manila.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): req.environ['manila.context'] = self.context return self.application class ManilaKeystoneContext(base_wsgi.Middleware): """Make a request context from keystone headers.""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): user_id = req.headers.get('X_USER') user_id = req.headers.get('X_USER_ID', user_id) if user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) ctx = context.RequestContext(user_id, project_id, roles=roles, auth_token=auth_token, remote_address=remote_address, service_catalog=service_catalog) req.environ['manila.context'] = ctx return self.application class NoAuthMiddleware(base_wsgi.Middleware): """Return a fake token if one isn't specified.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') os_url = os.path.join(req.url, project_id) res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['manila.context'] = ctx return self.application manila-2.0.0/manila/api/middleware/__init__.py0000664000567000056710000000000012701407107022340 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/middleware/fault.py0000664000567000056710000000540712701407107021734 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.i18n import _LE from manila.i18n import _LI from manila import utils from manila import wsgi as base_wsgi LOG = log.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): LOG.exception(_LE("Caught error: %s"), six.text_type(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: outer.explanation = '%s: %s' % (inner.__class__.__name__, six.text_type(inner)) return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) manila-2.0.0/manila/api/middleware/sizelimit.py0000664000567000056710000000243212701407107022625 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body limiting middleware. """ from oslo_log import log from oslo_middleware import sizelimit from manila.i18n import _LW LOG = log.getLogger(__name__) class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): def __init__(self, *args, **kwargs): LOG.warning(_LW('manila.api.sizelimit:RequestBodySizeLimiter and ' 'manila.api.middleware.sizelimit:' 'RequestBodySizeLimiter ' 'are deprecated. Please use ' 'oslo_middleware.sizelimit: ' 'RequestBodySizeLimiter instead.')) super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) manila-2.0.0/manila/api/v2/0000775000567000056710000000000012701407265016460 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/v2/shares.py0000664000567000056710000001561512701407107020322 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.v1 import share_manage from manila.api.v1 import share_unmanage from manila.api.v1 import shares from manila.api.views import shares as share_views from manila import share class ShareController(shares.ShareMixin, share_manage.ShareManageMixin, share_unmanage.ShareUnmanageMixin, wsgi.Controller, wsgi.AdminActionsMixin): """The Shares API v2 controller for the OpenStack API.""" resource_name = 'share' _view_builder_class = share_views.ViewBuilder def __init__(self): super(self.__class__, self).__init__() self.share_api = share.API() @wsgi.Controller.api_version("2.4") def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version("2.0", "2.3") # noqa def create(self, req, body): # pylint: disable=E0102 # Remove consistency group attributes body.get('share', {}).pop('consistency_group_id', None) share = self._create(req, body) return share @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-reset_status') def share_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') def share_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-force_delete') def share_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def share_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.5', '2.6', experimental=True) @wsgi.action("os-migrate_share") @wsgi.Controller.authorize("migration_start") def migrate_share_legacy(self, req, id, body): return self._migration_start(req, id, body) @wsgi.Controller.api_version('2.7', '2.14', experimental=True) @wsgi.action("migrate_share") @wsgi.Controller.authorize("migration_start") def migrate_share(self, req, id, body): return self._migration_start(req, id, body) @wsgi.Controller.api_version('2.15', experimental=True) @wsgi.action("migration_start") @wsgi.Controller.authorize def migration_start(self, req, id, body): return self._migration_start(req, id, body, check_notify=True) @wsgi.Controller.api_version('2.15', experimental=True) @wsgi.action("migration_complete") @wsgi.Controller.authorize def migration_complete(self, req, id, body): return self._migration_complete(req, id, body) @wsgi.Controller.api_version('2.15', experimental=True) @wsgi.action("migration_cancel") @wsgi.Controller.authorize def migration_cancel(self, req, id, body): return self._migration_cancel(req, id, body) @wsgi.Controller.api_version('2.15', experimental=True) @wsgi.action("migration_get_progress") @wsgi.Controller.authorize def migration_get_progress(self, req, id, body): return self._migration_get_progress(req, id, body) @wsgi.Controller.api_version('2.15', experimental=True) @wsgi.action("reset_task_state") @wsgi.Controller.authorize def reset_task_state(self, req, id, body): return self._reset_status(req, id, body, status_attr='task_state') @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-allow_access') def allow_access_legacy(self, req, id, body): """Add share access rule.""" return self._allow_access(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('allow_access') def allow_access(self, req, id, body): """Add share access rule.""" if req.api_version_request < api_version.APIVersionRequest("2.13"): return self._allow_access(req, id, body) else: return self._allow_access(req, id, body, enable_ceph=True) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-deny_access') def deny_access_legacy(self, req, id, body): """Remove share access rule.""" return self._deny_access(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('deny_access') def deny_access(self, req, id, body): """Remove share access rule.""" return self._deny_access(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-access_list') def access_list_legacy(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('access_list') def access_list(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-extend') def extend_legacy(self, req, id, body): """Extend size of a share.""" return self._extend(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('extend') def extend(self, req, id, body): """Extend size of a share.""" return self._extend(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-shrink') def shrink_legacy(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('shrink') def shrink(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) @wsgi.Controller.api_version('2.7', '2.7') def manage(self, req, body): body.get('share', {}).pop('is_public', None) detail = self._manage(req, body) return detail @wsgi.Controller.api_version("2.8") # noqa def manage(self, req, body): # pylint: disable=E0102 detail = self._manage(req, body) return detail @wsgi.Controller.api_version('2.7') @wsgi.action('unmanage') def unmanage(self, req, id, body=None): return self._unmanage(req, id, body) def create_resource(): return wsgi.Resource(ShareController()) manila-2.0.0/manila/api/v2/quota_sets.py0000664000567000056710000002217512701407107021223 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import strutils from six.moves.urllib import parse import webob from manila.api.openstack import wsgi from manila.api.views import quota_sets as quota_sets_views from manila import db from manila import exception from manila.i18n import _ from manila import quota QUOTAS = quota.QUOTAS LOG = log.getLogger(__name__) NON_QUOTA_KEYS = ('tenant_id', 'id', 'force') class QuotaSetsMixin(object): """The Quota Sets API controller common logic. Mixin class that should be inherited by Quota Sets API controllers, which are used for different API URLs and microversions. """ resource_name = "quota_set" _view_builder_class = quota_sets_views.ViewBuilder def _validate_quota_limit(self, limit, minimum, maximum, force_update): # NOTE: -1 is a flag value for unlimited if limit < -1: msg = _("Quota limit must be -1 or greater.") raise webob.exc.HTTPBadRequest(explanation=msg) if ((limit < minimum and not force_update) and (maximum != -1 or (maximum == -1 and limit != -1))): msg = _("Quota limit must be greater than %s.") % minimum raise webob.exc.HTTPBadRequest(explanation=msg) if maximum != -1 and limit > maximum: msg = _("Quota limit must be less than %s.") % maximum raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, id, user_id=None, usages=False): if user_id: values = QUOTAS.get_user_quotas(context, id, user_id, usages=usages) else: values = QUOTAS.get_project_quotas(context, id, usages=usages) if usages: return values return {k: v['limit'] for k, v in values.items()} @wsgi.Controller.authorize("show") def _show(self, req, id): context = req.environ['manila.context'] params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] try: db.authorize_project_context(context, id) return self._view_builder.detail_list( self._get_quotas(context, id, user_id=user_id), id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() @wsgi.Controller.authorize('show') def _defaults(self, req, id): context = req.environ['manila.context'] return self._view_builder.detail_list(QUOTAS.get_defaults(context), id) @wsgi.Controller.authorize("update") def _update(self, req, id, body): context = req.environ['manila.context'] project_id = id bad_keys = [] force_update = False params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] try: settable_quotas = QUOTAS.get_settable_quotas(context, project_id, user_id=user_id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body.get('quota_set', {}).items(): if (key not in QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) continue if key == 'force': force_update = strutils.bool_from_string(value) elif key not in NON_QUOTA_KEYS and value: try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Force update quotas: %s.", force_update) if len(bad_keys) > 0: msg = _("Bad key(s) %s in quota_set.") % ",".join(bad_keys) raise webob.exc.HTTPBadRequest(explanation=msg) try: quotas = self._get_quotas(context, id, user_id=user_id, usages=True) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body.get('quota_set', {}).items(): if key in NON_QUOTA_KEYS or (not value and value != 0): continue # validate whether already used and reserved exceeds the new # quota, this check will be ignored if admin want to force # update try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) if force_update is False and value >= 0: quota_value = quotas.get(key) if quota_value and quota_value['limit'] >= 0: quota_used = (quota_value['in_use'] + quota_value['reserved']) LOG.debug("Quota %(key)s used: %(quota_used)s, " "value: %(value)s.", {'key': key, 'quota_used': quota_used, 'value': value}) if quota_used > value: msg = (_("Quota value %(value)s for %(key)s are " "greater than already used and reserved " "%(quota_used)s.") % {'value': value, 'key': key, 'quota_used': quota_used}) raise webob.exc.HTTPBadRequest(explanation=msg) minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(value, minimum, maximum, force_update) try: db.quota_create(context, project_id, key, value, user_id=user_id) except exception.QuotaExists: db.quota_update(context, project_id, key, value, user_id=user_id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( self._get_quotas(context, id, user_id=user_id)) @wsgi.Controller.authorize("delete") def _delete(self, req, id): context = req.environ['manila.context'] params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] try: db.authorize_project_context(context, id) if user_id: QUOTAS.destroy_all_by_project_and_user(context, id, user_id) else: QUOTAS.destroy_all_by_project(context, id) return webob.Response(status_int=202) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() class QuotaSetsControllerLegacy(QuotaSetsMixin, wsgi.Controller): """Deprecated Quota Sets API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-quota-sets'. """ @wsgi.Controller.api_version('1.0', '2.6') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('1.0', '2.6') def defaults(self, req, id): return self._defaults(req, id) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): return self._update(req, id, body) @wsgi.Controller.api_version('1.0', '2.6') def delete(self, req, id): return self._delete(req, id) class QuotaSetsController(QuotaSetsMixin, wsgi.Controller): """Quota Sets API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'quota-sets'. """ @wsgi.Controller.api_version('2.7') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('2.7') def defaults(self, req, id): return self._defaults(req, id) @wsgi.Controller.api_version('2.7') def update(self, req, id, body): return self._update(req, id, body) @wsgi.Controller.api_version('2.7') def delete(self, req, id): return self._delete(req, id) def create_resource_legacy(): return wsgi.Resource(QuotaSetsControllerLegacy()) def create_resource(): return wsgi.Resource(QuotaSetsController()) manila-2.0.0/manila/api/v2/share_instance_export_locations.py0000664000567000056710000000526012701407107025472 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from webob import exc from manila.api.openstack import wsgi from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ class ShareInstanceExportLocationController(wsgi.Controller): """The Share Instance Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_instance_export_location' super(self.__class__, self).__init__() def _verify_share_instance(self, context, share_instance_id): try: db_api.share_instance_get(context, share_instance_id) except exception.NotFound: msg = _("Share instance '%s' not found.") % share_instance_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def index(self, req, share_instance_id): """Return a list of export locations for the share instance.""" context = req.environ['manila.context'] self._verify_share_instance(context, share_instance_id) export_locations = ( db_api.share_export_locations_get_by_share_instance_id( context, share_instance_id)) return self._view_builder.summary_list(req, export_locations) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def show(self, req, share_instance_id, export_location_uuid): """Return data about the requested export location.""" context = req.environ['manila.context'] self._verify_share_instance(context, share_instance_id) try: export_location = db_api.share_export_location_get_by_uuid( context, export_location_uuid) return self._view_builder.detail(req, export_location) except exception.ExportLocationNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) def create_resource(): return wsgi.Resource(ShareInstanceExportLocationController()) manila-2.0.0/manila/api/v2/share_export_locations.py0000664000567000056710000000525612701407107023613 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ class ShareExportLocationController(wsgi.Controller): """The Share Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_export_location' super(self.__class__, self).__init__() def _verify_share(self, context, share_id): try: db_api.share_get(context, share_id) except exception.NotFound: msg = _("Share '%s' not found.") % share_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def index(self, req, share_id): """Return a list of export locations for share.""" context = req.environ['manila.context'] self._verify_share(context, share_id) export_locations = db_api.share_export_locations_get_by_share_id( context, share_id, include_admin_only=context.is_admin) return self._view_builder.summary_list(req, export_locations) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def show(self, req, share_id, export_location_uuid): """Return data about the requested export location.""" context = req.environ['manila.context'] self._verify_share(context, share_id) try: export_location = db_api.share_export_location_get_by_uuid( context, export_location_uuid) except exception.ExportLocationNotFound: msg = _("Export location '%s' not found.") % export_location_uuid raise exc.HTTPNotFound(explanation=msg) if export_location.is_admin_only and not context.is_admin: raise exc.HTTPForbidden() return self._view_builder.detail(req, export_location) def create_resource(): return wsgi.Resource(ShareExportLocationController()) manila-2.0.0/manila/api/v2/share_instances.py0000664000567000056710000000635012701407107022202 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_instance as instance_view from manila import db from manila import exception from manila import share class ShareInstancesController(wsgi.Controller, wsgi.AdminActionsMixin): """The share instances API controller for the OpenStack API.""" resource_name = 'share_instance' _view_builder_class = instance_view.ViewBuilder def __init__(self): self.share_api = share.API() super(self.__class__, self).__init__() def _get(self, *args, **kwargs): return db.share_instance_get(*args, **kwargs) def _update(self, *args, **kwargs): db.share_instance_update(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete_instance(*args, **kwargs) @wsgi.Controller.api_version('2.3', '2.6') @wsgi.action('os-reset_status') def instance_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') def instance_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.3', '2.6') @wsgi.action('os-force_delete') def instance_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def instance_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version("2.3") @wsgi.Controller.authorize def index(self, req): context = req.environ['manila.context'] instances = db.share_instances_get_all(context) return self._view_builder.detail_list(req, instances) @wsgi.Controller.api_version("2.3") @wsgi.Controller.authorize def show(self, req, id): context = req.environ['manila.context'] try: instance = db.share_instance_get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, instance) @wsgi.Controller.api_version("2.3") @wsgi.Controller.authorize('index') def get_share_instances(self, req, share_id): context = req.environ['manila.context'] try: share = self.share_api.get(context, share_id) except exception.NotFound: raise exc.HTTPNotFound() view = instance_view.ViewBuilder() return view.detail_list(req, share.instances) def create_resource(): return wsgi.Resource(ShareInstancesController()) manila-2.0.0/manila/api/v2/__init__.py0000664000567000056710000000000012701407107020552 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/v2/services.py0000664000567000056710000001012312701407107020645 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from manila.api.openstack import wsgi from manila.api.views import services as services_views from manila import db from manila import utils class ServiceMixin(object): """The Services API controller common logic. Mixin class that should be inherited by Services API controllers, which are used for different API URLs and microversions. """ resource_name = "service" _view_builder_class = services_views.ViewBuilder @wsgi.Controller.authorize("index") def _index(self, req): """Return a list of all running services.""" context = req.environ['manila.context'] all_services = db.service_get_all(context) services = [] for service in all_services: service = { 'id': service['id'], 'binary': service['binary'], 'host': service['host'], 'zone': service['availability_zone']['name'], 'status': 'disabled' if service['disabled'] else 'enabled', 'state': 'up' if utils.service_is_up(service) else 'down', 'updated_at': service['updated_at'], } services.append(service) search_opts = [ 'host', 'binary', 'zone', 'state', 'status', ] for search_opt in search_opts: value = '' if search_opt in req.GET: value = req.GET[search_opt] services = [s for s in services if s[search_opt] == value] if len(services) == 0: break return self._view_builder.detail_list(services) @wsgi.Controller.authorize("update") def _update(self, req, id, body): """Enable/Disable scheduling for a service.""" context = req.environ['manila.context'] if id == "enable": data = {'disabled': False} elif id == "disable": data = {'disabled': True} else: raise webob.exc.HTTPNotFound("Unknown action '%s'" % id) try: data['host'] = body['host'] data['binary'] = body['binary'] except (TypeError, KeyError): raise webob.exc.HTTPBadRequest() svc = db.service_get_by_args(context, data['host'], data['binary']) db.service_update( context, svc['id'], {'disabled': data['disabled']}) return self._view_builder.summary(data) class ServiceControllerLegacy(ServiceMixin, wsgi.Controller): """Deprecated Services API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-services'. """ @wsgi.Controller.api_version('1.0', '2.6') def index(self, req): return self._index(req) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): return self._update(req, id, body) class ServiceController(ServiceMixin, wsgi.Controller): """Services API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'services'. """ @wsgi.Controller.api_version('2.7') def index(self, req): return self._index(req) @wsgi.Controller.api_version('2.7') def update(self, req, id, body): return self._update(req, id, body) def create_resource_legacy(): return wsgi.Resource(ServiceControllerLegacy()) def create_resource(): return wsgi.Resource(ServiceController()) manila-2.0.0/manila/api/v2/router.py0000664000567000056710000003257212701407107020356 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Share API v2. """ from manila.api import extensions import manila.api.openstack from manila.api.v1 import limits from manila.api.v1 import scheduler_stats from manila.api.v1 import security_service from manila.api.v1 import share_manage from manila.api.v1 import share_metadata from manila.api.v1 import share_networks from manila.api.v1 import share_servers from manila.api.v1 import share_types_extra_specs from manila.api.v1 import share_unmanage from manila.api.v2 import availability_zones from manila.api.v2 import cgsnapshots from manila.api.v2 import consistency_groups from manila.api.v2 import quota_class_sets from manila.api.v2 import quota_sets from manila.api.v2 import services from manila.api.v2 import share_export_locations from manila.api.v2 import share_instance_export_locations from manila.api.v2 import share_instances from manila.api.v2 import share_replicas from manila.api.v2 import share_snapshots from manila.api.v2 import share_types from manila.api.v2 import shares from manila.api import versions class APIRouter(manila.api.openstack.APIRouter): """Route API requests. Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources["versions"] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources["versions"], action="index") mapper.redirect("", "/") self.resources["availability_zones_legacy"] = ( availability_zones.create_resource_legacy()) # TODO(vponomaryov): "os-availability-zone" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("availability-zone", "os-availability-zone", controller=self.resources["availability_zones_legacy"]) self.resources["availability_zones"] = ( availability_zones.create_resource()) mapper.resource("availability-zone", "availability-zones", controller=self.resources["availability_zones"]) self.resources["services_legacy"] = services.create_resource_legacy() # TODO(vponomaryov): "os-services" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("service", "os-services", controller=self.resources["services_legacy"]) self.resources["services"] = services.create_resource() mapper.resource("service", "services", controller=self.resources["services"]) self.resources["quota_sets_legacy"] = ( quota_sets.create_resource_legacy()) # TODO(vponomaryov): "os-quota-sets" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("quota-set", "os-quota-sets", controller=self.resources["quota_sets_legacy"], member={"defaults": "GET"}) self.resources["quota_sets"] = quota_sets.create_resource() mapper.resource("quota-set", "quota-sets", controller=self.resources["quota_sets"], member={"defaults": "GET"}) self.resources["quota_class_sets_legacy"] = ( quota_class_sets.create_resource_legacy()) # TODO(vponomaryov): "os-quota-class-sets" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("quota-class-set", "os-quota-class-sets", controller=self.resources["quota_class_sets_legacy"]) self.resources["quota_class_sets"] = quota_class_sets.create_resource() mapper.resource("quota-class-set", "quota-class-sets", controller=self.resources["quota_class_sets"]) self.resources["share_manage"] = share_manage.create_resource() # TODO(vponomaryov): "os-share-manage" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("share_manage", "os-share-manage", controller=self.resources["share_manage"]) self.resources["share_unmanage"] = share_unmanage.create_resource() # TODO(vponomaryov): "os-share-unmanage" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("share_unmanage", "os-share-unmanage", controller=self.resources["share_unmanage"], member={"unmanage": "POST"}) self.resources["shares"] = shares.create_resource() mapper.resource("share", "shares", controller=self.resources["shares"], collection={"detail": "GET"}, member={"action": "POST"}) mapper.connect("shares", "/{project_id}/shares/manage", controller=self.resources["shares"], action="manage", conditions={"method": ["POST"]}) self.resources["share_instances"] = share_instances.create_resource() mapper.resource("share_instance", "share_instances", controller=self.resources["share_instances"], collection={"detail": "GET"}, member={"action": "POST"}) self.resources["share_instance_export_locations"] = ( share_instance_export_locations.create_resource()) mapper.connect("share_instances", ("/{project_id}/share_instances/{share_instance_id}/" "export_locations"), controller=self.resources[ "share_instance_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("share_instances", ("/{project_id}/share_instances/{share_instance_id}/" "export_locations/{export_location_uuid}"), controller=self.resources[ "share_instance_export_locations"], action="show", conditions={"method": ["GET"]}) mapper.connect("share_instance", "/{project_id}/shares/{share_id}/instances", controller=self.resources["share_instances"], action="get_share_instances", conditions={"method": ["GET"]}) self.resources["share_export_locations"] = ( share_export_locations.create_resource()) mapper.connect("shares", "/{project_id}/shares/{share_id}/export_locations", controller=self.resources["share_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("shares", ("/{project_id}/shares/{share_id}/" "export_locations/{export_location_uuid}"), controller=self.resources["share_export_locations"], action="show", conditions={"method": ["GET"]}) self.resources["snapshots"] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", controller=self.resources["snapshots"], collection={"detail": "GET"}, member={"action": "POST"}) mapper.connect("snapshots", "/{project_id}/snapshots/manage", controller=self.resources["snapshots"], action="manage", conditions={"method": ["POST"]}) self.resources["share_metadata"] = share_metadata.create_resource() share_metadata_controller = self.resources["share_metadata"] mapper.resource("share_metadata", "metadata", controller=share_metadata_controller, parent_resource=dict(member_name="share", collection_name="shares")) mapper.connect("metadata", "/{project_id}/shares/{share_id}/metadata", controller=share_metadata_controller, action="update_all", conditions={"method": ["PUT"]}) self.resources["limits"] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources["limits"]) self.resources["security_services"] = ( security_service.create_resource()) mapper.resource("security-service", "security-services", controller=self.resources["security_services"], collection={"detail": "GET"}) self.resources["share_networks"] = share_networks.create_resource() mapper.resource(share_networks.RESOURCE_NAME, "share-networks", controller=self.resources["share_networks"], collection={"detail": "GET"}, member={"action": "POST"}) self.resources["share_servers"] = share_servers.create_resource() mapper.resource("share_server", "share-servers", controller=self.resources["share_servers"]) mapper.connect("details", "/{project_id}/share-servers/{id}/details", controller=self.resources["share_servers"], action="details", conditions={"method": ["GET"]}) self.resources["types"] = share_types.create_resource() mapper.resource("type", "types", controller=self.resources["types"], collection={"detail": "GET", "default": "GET"}, member={"action": "POST", "os-share-type-access": "GET", "share_type_access": "GET"}) self.resources["extra_specs"] = ( share_types_extra_specs.create_resource()) mapper.resource("extra_spec", "extra_specs", controller=self.resources["extra_specs"], parent_resource=dict(member_name="type", collection_name="types")) self.resources["scheduler_stats"] = scheduler_stats.create_resource() mapper.connect("pools", "/{project_id}/scheduler-stats/pools", controller=self.resources["scheduler_stats"], action="pools_index", conditions={"method": ["GET"]}) mapper.connect("pools", "/{project_id}/scheduler-stats/pools/detail", controller=self.resources["scheduler_stats"], action="pools_detail", conditions={"method": ["GET"]}) self.resources["consistency-groups"] = ( consistency_groups.create_resource()) mapper.resource("consistency-group", "consistency-groups", controller=self.resources["consistency-groups"], collection={"detail": "GET"}) mapper.connect("consistency-groups", "/{project_id}/consistency-groups/{id}/action", controller=self.resources["consistency-groups"], action="action", conditions={"action": ["POST"]}) self.resources["cgsnapshots"] = cgsnapshots.create_resource() mapper.resource("cgsnapshot", "cgsnapshots", controller=self.resources["cgsnapshots"], collection={"detail": "GET"}, member={"members": "GET", "action": "POST"}) self.resources['share-replicas'] = share_replicas.create_resource() mapper.resource("share-replica", "share-replicas", controller=self.resources['share-replicas'], collection={'detail': 'GET'}, member={'action': 'POST'}) manila-2.0.0/manila/api/v2/share_replicas.py0000664000567000056710000002141012701407112022003 0ustar jenkinsjenkins00000000000000# Copyright 2015 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Share Replication API.""" from oslo_log import log import six import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_replicas as replication_view from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila import share LOG = log.getLogger(__name__) MIN_SUPPORTED_API_VERSION = '2.11' class ShareReplicationController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share Replication API controller for the OpenStack API.""" resource_name = 'share_replica' _view_builder_class = replication_view.ReplicationViewBuilder def __init__(self): super(ShareReplicationController, self).__init__() self.share_api = share.API() def _update(self, *args, **kwargs): db.share_replica_update(*args, **kwargs) def _get(self, *args, **kwargs): return db.share_replica_get(*args, **kwargs) def _delete(self, context, resource, force=True): try: self.share_api.delete_share_replica(context, resource, force=True) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) def index(self, req): """Return a summary list of replicas.""" return self._get_replicas(req) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) def detail(self, req): """Returns a detailed list of replicas.""" return self._get_replicas(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_replicas(self, req, is_detail=False): """Returns list of replicas.""" context = req.environ['manila.context'] share_id = req.params.get('share_id') if share_id: try: replicas = db.share_replicas_get_all_by_share( context, share_id) except exception.NotFound: msg = _("Share with share ID %s not found.") % share_id raise exc.HTTPNotFound(explanation=msg) else: replicas = db.share_replicas_get_all(context) limited_list = common.limited(replicas, req) if is_detail: replicas = self._view_builder.detail_list(req, limited_list) else: replicas = self._view_builder.summary_list(req, limited_list) return replicas @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize def show(self, req, id): """Return data about the given replica.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("Replica %s not found.") % id raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, replica) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.response(202) @wsgi.Controller.authorize def create(self, req, body): """Add a replica to an existing share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_replica'): msg = _("Body does not contain 'share_replica' information.") raise exc.HTTPUnprocessableEntity(explanation=msg) share_id = body.get('share_replica').get('share_id') availability_zone = body.get('share_replica').get('availability_zone') share_network_id = body.get('share_replica').get('share_network_id') if not share_id: msg = _("Must provide Share ID to add replica.") raise exc.HTTPBadRequest(explanation=msg) try: share_ref = db.share_get(context, share_id) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % share_id) try: new_replica = self.share_api.create_share_replica( context, share_ref, availability_zone=availability_zone, share_network_id=share_network_id) except exception.AvailabilityZoneNotFound as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.ShareBusyException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return self._view_builder.detail(req, new_replica) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize def delete(self, req, id): """Delete a replica.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) try: self.share_api.delete_share_replica(context, replica) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return webob.Response(status_int=202) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('promote') @wsgi.response(202) @wsgi.Controller.authorize def promote(self, req, id, body): """Promote a replica to active state.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) replica_state = replica.get('replica_state') if replica_state == constants.REPLICA_STATE_ACTIVE: return webob.Response(status_int=200) try: replica = self.share_api.promote_share_replica(context, replica) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) return self._view_builder.detail(req, replica) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('reset_status') def reset_status(self, req, id, body): """Reset the 'status' attribute in the database.""" return self._reset_status(req, id, body) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('force_delete') def force_delete(self, req, id, body): """Force deletion on the database, attempt on the backend.""" return self._force_delete(req, id, body) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('reset_replica_state') @wsgi.Controller.authorize def reset_replica_state(self, req, id, body): """Reset the 'replica_state' attribute in the database.""" return self._reset_status(req, id, body, status_attr='replica_state') @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('resync') @wsgi.response(202) @wsgi.Controller.authorize def resync(self, req, id, body): """Attempt to update/sync the replica with its source.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) replica_state = replica.get('replica_state') if replica_state == constants.REPLICA_STATE_ACTIVE: return webob.Response(status_int=200) try: self.share_api.update_share_replica(context, replica) except exception.InvalidHost as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) def create_resource(): return wsgi.Resource(ShareReplicationController()) manila-2.0.0/manila/api/v2/quota_class_sets.py0000664000567000056710000000661112701407107022405 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack LLC. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from manila.api.openstack import wsgi from manila.api.views import quota_class_sets as quota_class_sets_views from manila import db from manila import exception from manila import quota QUOTAS = quota.QUOTAS class QuotaClassSetsMixin(object): """The Quota Class Sets API controller common logic. Mixin class that should be inherited by Quota Class Sets API controllers, which are used for different API URLs and microversions. """ resource_name = "quota_class_set" _view_builder_class = quota_class_sets_views.ViewBuilder @wsgi.Controller.authorize("show") def _show(self, req, id): context = req.environ['manila.context'] try: db.authorize_quota_class_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( QUOTAS.get_class_quotas(context, id), id) @wsgi.Controller.authorize("update") def _update(self, req, id, body): context = req.environ['manila.context'] quota_class = id for key in body.get(self.resource_name, {}).keys(): if key in QUOTAS: value = int(body[self.resource_name][key]) try: db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: db.quota_class_create(context, quota_class, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( QUOTAS.get_class_quotas(context, quota_class)) class QuotaClassSetsControllerLegacy(QuotaClassSetsMixin, wsgi.Controller): """Deprecated Quota Class Sets API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-quota-class-sets'. """ @wsgi.Controller.api_version('1.0', '2.6') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): return self._update(req, id, body) class QuotaClassSetsController(QuotaClassSetsMixin, wsgi.Controller): """Quota Class Sets API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'quota-class-sets'. """ @wsgi.Controller.api_version('2.7') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('2.7') def update(self, req, id, body): return self._update(req, id, body) def create_resource_legacy(): return wsgi.Resource(QuotaClassSetsControllerLegacy()) def create_resource(): return wsgi.Resource(QuotaClassSetsController()) manila-2.0.0/manila/api/v2/share_types.py0000664000567000056710000002527412701407107021365 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright (c) 2014 NetApp, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share type API controller module..""" from oslo_utils import strutils from oslo_utils import uuidutils import six import webob from webob import exc from manila.api.openstack import wsgi from manila.api.views import types as views_types from manila import exception from manila.i18n import _ from manila import rpc from manila.share import share_types class ShareTypesController(wsgi.Controller): """The share types API controller for the OpenStack API.""" resource_name = 'share_type' _view_builder_class = views_types.ViewBuilder def __getattr__(self, key): if key == 'os-share-type-access': return self.share_type_access return super(self.__class__, self).__getattr__(key) def _notify_share_type_error(self, context, method, payload): rpc.get_notifier('shareType').error(context, method, payload) def _check_body(self, body, action_name): if not self.is_valid_body(body, action_name): raise webob.exc.HTTPBadRequest() access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Bad project format: " "project is not in proper format (%s)") % project raise webob.exc.HTTPBadRequest(explanation=msg) @wsgi.Controller.authorize def index(self, req): """Returns the list of share types.""" limited_types = self._get_share_types(req) req.cache_db_share_types(limited_types) return self._view_builder.index(req, limited_types) @wsgi.Controller.authorize def show(self, req, id): """Return a single share type item.""" context = req.environ['manila.context'] try: share_type = share_types.get_share_type(context, id) except exception.NotFound: msg = _("Share type not found.") raise exc.HTTPNotFound(explanation=msg) share_type['id'] = six.text_type(share_type['id']) req.cache_db_share_type(share_type) return self._view_builder.show(req, share_type) @wsgi.Controller.authorize def default(self, req): """Return default volume type.""" context = req.environ['manila.context'] try: share_type = share_types.get_default_share_type(context) except exception.NotFound: msg = _("Share type not found") raise exc.HTTPNotFound(explanation=msg) if not share_type: msg = _("Default share type not found") raise exc.HTTPNotFound(explanation=msg) share_type['id'] = six.text_type(share_type['id']) return self._view_builder.show(req, share_type) def _get_share_types(self, req): """Helper function that returns a list of type dicts.""" filters = {} context = req.environ['manila.context'] if context.is_admin: # Only admin has query access to all share types filters['is_public'] = self._parse_is_public( req.params.get('is_public')) else: filters['is_public'] = True limited_types = share_types.get_all_types( context, search_opts=filters).values() return list(limited_types) @staticmethod def _parse_is_public(is_public): """Parse is_public into something usable. * True: API should list public share types only * False: API should list private share types only * None: API should list both public and private share types """ if is_public is None: # preserve default value of showing only public types return True elif six.text_type(is_public).lower() == "all": return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise exc.HTTPBadRequest(explanation=msg) @wsgi.action("create") @wsgi.Controller.authorize('create') def _create(self, req, body): """Creates a new share type.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_type') and \ not self.is_valid_body(body, 'volume_type'): raise webob.exc.HTTPBadRequest() elif self.is_valid_body(body, 'share_type'): share_type = body['share_type'] else: share_type = body['volume_type'] name = share_type.get('name') specs = share_type.get('extra_specs', {}) is_public = share_type.get( 'os-share-type-access:is_public', share_type.get('share_type_access:is_public', True), ) if name is None or name == "" or len(name) > 255: msg = _("Type name is not valid.") raise webob.exc.HTTPBadRequest(explanation=msg) try: required_extra_specs = ( share_types.get_valid_required_extra_specs(specs) ) except exception.InvalidExtraSpec as e: raise webob.exc.HTTPBadRequest(explanation=six.text_type(e)) try: share_types.create(context, name, specs, is_public) share_type = share_types.get_share_type_by_name(context, name) share_type['required_extra_specs'] = required_extra_specs req.cache_db_share_type(share_type) notifier_info = dict(share_types=share_type) rpc.get_notifier('shareType').info( context, 'share_type.create', notifier_info) except exception.ShareTypeExists as err: notifier_err = dict(share_types=share_type, error_message=six.text_type(err)) self._notify_share_type_error(context, 'share_type.create', notifier_err) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.NotFound as err: notifier_err = dict(share_types=share_type, error_message=six.text_type(err)) self._notify_share_type_error(context, 'share_type.create', notifier_err) raise webob.exc.HTTPNotFound() return self._view_builder.show(req, share_type) @wsgi.action("delete") @wsgi.Controller.authorize('delete') def _delete(self, req, id): """Deletes an existing share type.""" context = req.environ['manila.context'] try: share_type = share_types.get_share_type(context, id) share_types.destroy(context, share_type['id']) notifier_info = dict(share_types=share_type) rpc.get_notifier('shareType').info( context, 'share_type.delete', notifier_info) except exception.ShareTypeInUse as err: notifier_err = dict(id=id, error_message=six.text_type(err)) self._notify_share_type_error(context, 'share_type.delete', notifier_err) msg = 'Target share type is still in use.' raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NotFound as err: notifier_err = dict(id=id, error_message=six.text_type(err)) self._notify_share_type_error(context, 'share_type.delete', notifier_err) raise webob.exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.Controller.authorize('list_project_access') def share_type_access(self, req, id): context = req.environ['manila.context'] try: share_type = share_types.get_share_type( context, id, expected_fields=['projects']) except exception.ShareTypeNotFound: explanation = _("Share type %s not found.") % id raise webob.exc.HTTPNotFound(explanation=explanation) if share_type['is_public']: expl = _("Access list not available for public share types.") raise webob.exc.HTTPNotFound(explanation=expl) # TODO(vponomaryov): move to views. rval = [] for project_id in share_type['projects']: rval.append( {'share_type_id': share_type['id'], 'project_id': project_id} ) return {'share_type_access': rval} @wsgi.action('addProjectAccess') @wsgi.Controller.authorize('add_project_access') def _add_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'addProjectAccess') project = body['addProjectAccess']['project'] self._verify_if_non_public_share_type(context, id) try: share_types.add_share_type_access(context, id, project) except exception.ShareTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=six.text_type(err)) return webob.Response(status_int=202) @wsgi.action('removeProjectAccess') @wsgi.Controller.authorize('remove_project_access') def _remove_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] self._verify_if_non_public_share_type(context, id) try: share_types.remove_share_type_access(context, id, project) except exception.ShareTypeAccessNotFound as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) return webob.Response(status_int=202) def _verify_if_non_public_share_type(self, context, share_type_id): try: share_type = share_types.get_share_type(context, share_type_id) if share_type['is_public']: msg = _("Type access modification is not applicable to " "public share type.") raise webob.exc.HTTPConflict(explanation=msg) except exception.ShareTypeNotFound as err: raise webob.exc.HTTPNotFound(explanation=six.text_type(err)) def create_resource(): return wsgi.Resource(ShareTypesController()) manila-2.0.0/manila/api/v2/consistency_groups.py0000664000567000056710000002205512701407107022771 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistency groups API.""" from oslo_log import log from oslo_utils import uuidutils import six import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi import manila.api.views.consistency_groups as cg_views import manila.consistency_group.api as cg_api from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila.share import share_types LOG = log.getLogger(__name__) class CGController(wsgi.Controller, wsgi.AdminActionsMixin): """The Consistency Groups API controller for the OpenStack API.""" resource_name = 'consistency_group' _view_builder_class = cg_views.CGViewBuilder resource_name = 'consistency_group' def __init__(self): super(CGController, self).__init__() self.cg_api = cg_api.API() @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get') def show(self, req, id): """Return data about the given CG.""" context = req.environ['manila.context'] try: cg = self.cg_api.get(context, id) except exception.NotFound: msg = _("Consistency group %s not found.") % id raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, cg) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize def delete(self, req, id): """Delete a CG.""" context = req.environ['manila.context'] LOG.info(_LI("Delete consistency group with id: %s"), id, context=context) try: cg = self.cg_api.get(context, id) except exception.NotFound: msg = _("Consistency group %s not found.") % id raise exc.HTTPNotFound(explanation=msg) try: self.cg_api.delete(context, cg) except exception.InvalidConsistencyGroup as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_all') def index(self, req): """Returns a summary list of shares.""" return self._get_cgs(req, is_detail=False) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_all') def detail(self, req): """Returns a detailed list of shares.""" return self._get_cgs(req, is_detail=True) def _get_cgs(self, req, is_detail): """Returns a list of shares, transformed through view builder.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to cg attrs search_opts.pop('limit', None) search_opts.pop('offset', None) cgs = self.cg_api.get_all( context, detailed=is_detail, search_opts=search_opts) limited_list = common.limited(cgs, req) if is_detail: cgs = self._view_builder.detail_list(req, limited_list) else: cgs = self._view_builder.summary_list(req, limited_list) return cgs @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize def update(self, req, id, body): """Update a share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'consistency_group'): msg = _("'consistency_group' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) cg_data = body['consistency_group'] valid_update_keys = { 'name', 'description', } invalid_fields = set(cg_data.keys()) - valid_update_keys if invalid_fields: msg = _("The fields %s are invalid or not allowed to be updated.") raise exc.HTTPBadRequest(explanation=msg % invalid_fields) try: cg = self.cg_api.get(context, id) except exception.NotFound: msg = _("Consistency group %s not found.") % id raise exc.HTTPNotFound(explanation=msg) cg = self.cg_api.update(context, cg, cg_data) return self._view_builder.detail(req, cg) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.response(202) @wsgi.Controller.authorize def create(self, req, body): """Creates a new share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'consistency_group'): msg = _("'consistency_group' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) cg = body['consistency_group'] valid_fields = {'name', 'description', 'share_types', 'source_cgsnapshot_id', 'share_network_id'} invalid_fields = set(cg.keys()) - valid_fields if invalid_fields: msg = _("The fields %s are invalid.") % invalid_fields raise exc.HTTPBadRequest(explanation=msg) if 'share_types' in cg and 'source_cgsnapshot_id' in cg: msg = _("Cannot supply both 'share_types' and " "'source_cgsnapshot_id' attributes.") raise exc.HTTPBadRequest(explanation=msg) if not cg.get('share_types') and 'source_cgsnapshot_id' not in cg: default_share_type = share_types.get_default_share_type() if default_share_type: cg['share_types'] = [default_share_type['id']] else: msg = _("Must specify at least one share type as a default " "share type has not been configured.") raise exc.HTTPBadRequest(explanation=msg) kwargs = {} if 'name' in cg: kwargs['name'] = cg.get('name') if 'description' in cg: kwargs['description'] = cg.get('description') _share_types = cg.get('share_types') if _share_types: if not all([uuidutils.is_uuid_like(st) for st in _share_types]): msg = _("The 'share_types' attribute must be a list of uuids") raise exc.HTTPBadRequest(explanation=msg) kwargs['share_type_ids'] = _share_types if 'source_cgsnapshot_id' in cg: source_cgsnapshot_id = cg.get('source_cgsnapshot_id') if not uuidutils.is_uuid_like(source_cgsnapshot_id): msg = _("The 'source_cgsnapshot_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=six.text_type(msg)) kwargs['source_cgsnapshot_id'] = source_cgsnapshot_id if 'share_network_id' in cg: share_network_id = cg.get('share_network_id') if not uuidutils.is_uuid_like(share_network_id): msg = _("The 'share_network_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=six.text_type(msg)) kwargs['share_network_id'] = share_network_id try: new_cg = self.cg_api.create(context, **kwargs) except exception.InvalidCGSnapshot as e: raise exc.HTTPConflict(explanation=six.text_type(e)) except (exception.CGSnapshotNotFound, exception.InvalidInput) as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return self._view_builder.detail(req, dict(new_cg.items())) def _update(self, *args, **kwargs): db.consistency_group_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.cg_api.get(*args, **kwargs) def _delete(self, context, resource, force=True): db.consistency_group_destroy(context.elevated(), resource['id']) @wsgi.Controller.api_version('2.4', '2.6', experimental=True) @wsgi.action('os-reset_status') def cg_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7', experimental=True) @wsgi.action('reset_status') def cg_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.4', '2.6', experimental=True) @wsgi.action('os-force_delete') def cg_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7', experimental=True) @wsgi.action('force_delete') def cg_force_delete(self, req, id, body): return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(CGController()) manila-2.0.0/manila/api/v2/availability_zones.py0000664000567000056710000000433612701407107022723 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import wsgi from manila.api.views import availability_zones as availability_zones_views from manila import db class AvailabilityZoneMixin(object): """The Availability Zone API controller common logic. Mixin class that should be inherited by Availability Zone API controllers, which are used for different API URLs and microversions. """ resource_name = "availability_zone" _view_builder_class = availability_zones_views.ViewBuilder @wsgi.Controller.authorize("index") def _index(self, req): """Describe all known availability zones.""" views = db.availability_zone_get_all(req.environ['manila.context']) return self._view_builder.detail_list(views) class AvailabilityZoneControllerLegacy(AvailabilityZoneMixin, wsgi.Controller): """Deprecated Availability Zone API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-availability-zone'. """ @wsgi.Controller.api_version('1.0', '2.6') def index(self, req): return self._index(req) class AvailabilityZoneController(AvailabilityZoneMixin, wsgi.Controller): """Availability Zone API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'availability-zones'. """ @wsgi.Controller.api_version('2.7') def index(self, req): return self._index(req) def create_resource_legacy(): return wsgi.Resource(AvailabilityZoneControllerLegacy()) def create_resource(): return wsgi.Resource(AvailabilityZoneController()) manila-2.0.0/manila/api/v2/cgsnapshots.py0000664000567000056710000002040312701407107021360 0ustar jenkinsjenkins00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistency groups snapshot API.""" from oslo_log import log from oslo_utils import uuidutils import six import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi import manila.api.views.cgsnapshots as cg_views import manila.consistency_group.api as cg_api from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LI LOG = log.getLogger(__name__) class CGSnapshotController(wsgi.Controller, wsgi.AdminActionsMixin): """The Consistency Group Snapshots API controller for the OpenStack API.""" resource_name = 'cgsnapshot' _view_builder_class = cg_views.CGSnapshotViewBuilder def __init__(self): super(CGSnapshotController, self).__init__() self.cg_api = cg_api.API() @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_cgsnapshot') def show(self, req, id): """Return data about the given cgsnapshot.""" context = req.environ['manila.context'] try: cg = self.cg_api.get_cgsnapshot(context, id) except exception.NotFound: msg = _("Consistency group snapshot %s not found.") % id raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, cg) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize def delete(self, req, id): """Delete a cgsnapshot.""" context = req.environ['manila.context'] LOG.info(_LI("Delete consistency group snapshot with id: %s"), id, context=context) try: snap = self.cg_api.get_cgsnapshot(context, id) except exception.NotFound: msg = _("Consistency group snapshot %s not found.") % id raise exc.HTTPNotFound(explanation=msg) try: self.cg_api.delete_cgsnapshot(context, snap) except exception.InvalidCGSnapshot as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_all') def index(self, req): """Returns a summary list of cgsnapshots.""" return self._get_cgs(req, is_detail=False) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_all') def detail(self, req): """Returns a detailed list of cgsnapshots.""" return self._get_cgs(req, is_detail=True) def _get_cgs(self, req, is_detail): """Returns a list of cgsnapshots.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to cg attrs search_opts.pop('limit', None) search_opts.pop('offset', None) snaps = self.cg_api.get_all_cgsnapshots( context, detailed=is_detail, search_opts=search_opts) limited_list = common.limited(snaps, req) if is_detail: snaps = self._view_builder.detail_list(req, limited_list) else: snaps = self._view_builder.summary_list(req, limited_list) return snaps @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize def update(self, req, id, body): """Update a cgsnapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'cgsnapshot'): msg = _("'cgsnapshot' is missing from the request body") raise exc.HTTPBadRequest(explanation=msg) cg_data = body['cgsnapshot'] valid_update_keys = { 'name', 'description', } invalid_fields = set(cg_data.keys()) - valid_update_keys if invalid_fields: msg = _("The fields %s are invalid or not allowed to be updated.") raise exc.HTTPBadRequest(explanation=msg % invalid_fields) try: cg = self.cg_api.get_cgsnapshot(context, id) except exception.NotFound: msg = _("Consistency group snapshot %s not found.") % id raise exc.HTTPNotFound(explanation=msg) cg = self.cg_api.update_cgsnapshot(context, cg, cg_data) return self._view_builder.detail(req, cg) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.response(202) @wsgi.Controller.authorize def create(self, req, body): """Creates a new cgsnapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'cgsnapshot'): msg = _("'cgsnapshot' is missing from the request body") raise exc.HTTPBadRequest(explanation=msg) cgsnapshot = body.get('cgsnapshot') if not cgsnapshot.get('consistency_group_id'): msg = _("Must supply 'consistency_group_id' attribute.") raise exc.HTTPBadRequest(explanation=msg) consistency_group_id = cgsnapshot.get('consistency_group_id') if (consistency_group_id and not uuidutils.is_uuid_like(consistency_group_id)): msg = _("The 'consistency_group_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=six.text_type(msg)) kwargs = {"consistency_group_id": consistency_group_id} if 'name' in cgsnapshot: kwargs['name'] = cgsnapshot.get('name') if 'description' in cgsnapshot: kwargs['description'] = cgsnapshot.get('description') try: new_snapshot = self.cg_api.create_cgsnapshot(context, **kwargs) except exception.ConsistencyGroupNotFound as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.InvalidConsistencyGroup as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return self._view_builder.detail(req, dict(new_snapshot.items())) @wsgi.Controller.api_version('2.4', experimental=True) @wsgi.Controller.authorize('get_cgsnapshot') def members(self, req, id): """Returns a list of cgsnapshot members.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to cg attrs search_opts.pop('limit', None) search_opts.pop('offset', None) snaps = self.cg_api.get_all_cgsnapshot_members(context, id) limited_list = common.limited(snaps, req) snaps = self._view_builder.member_list(req, limited_list) return snaps def _update(self, *args, **kwargs): db.cgsnapshot_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.cg_api.get_cgsnapshot(*args, **kwargs) def _delete(self, context, resource, force=True): db.cgsnapshot_destroy(context.elevated(), resource['id']) @wsgi.Controller.api_version('2.4', '2.6', experimental=True) @wsgi.action('os-reset_status') def cgsnapshot_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7', experimental=True) @wsgi.action('reset_status') def cgsnapshot_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.4', '2.6', experimental=True) @wsgi.action('os-force_delete') def cgsnapshot_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7', experimental=True) @wsgi.action('force_delete') def cgsnapshot_force_delete(self, req, id, body): return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(CGSnapshotController()) manila-2.0.0/manila/api/v2/share_snapshots.py0000664000567000056710000001523412701407107022236 0ustar jenkinsjenkins00000000000000# Copyright 2013 NetApp # Copyright 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share snapshots api.""" from oslo_log import log import six import webob from webob import exc from manila.api.openstack import wsgi from manila.api.v1 import share_snapshots from manila.api.views import share_snapshots as snapshot_views from manila.common import constants from manila import exception from manila.i18n import _, _LI from manila import share LOG = log.getLogger(__name__) class ShareSnapshotsController(share_snapshots.ShareSnapshotMixin, wsgi.Controller, wsgi.AdminActionsMixin): """The Share Snapshots API V2 controller for the OpenStack API.""" resource_name = 'share_snapshot' _view_builder_class = snapshot_views.ViewBuilder def __init__(self): super(ShareSnapshotsController, self).__init__() self.share_api = share.API() @wsgi.Controller.authorize('unmanage_snapshot') def _unmanage(self, req, id, body=None): """Unmanage a share snapshot.""" context = req.environ['manila.context'] LOG.info(_LI("Unmanage share snapshot with id: %s."), id) try: snapshot = self.share_api.get_snapshot(context, id) share = self.share_api.get(context, snapshot['share_id']) if share.get('share_server_id'): msg = _("Operation 'unmanage_snapshot' is not supported for " "snapshots of shares that are created with share" " servers (created with share-networks).") raise exc.HTTPForbidden(explanation=msg) elif snapshot['status'] in constants.TRANSITIONAL_STATUSES: msg = _("Snapshot with transitional state cannot be " "unmanaged. Snapshot '%(s_id)s' is in '%(state)s' " "state.") % {'state': snapshot['status'], 's_id': snapshot['id']} raise exc.HTTPForbidden(explanation=msg) self.share_api.unmanage_snapshot(context, snapshot, share['host']) except (exception.ShareSnapshotNotFound, exception.ShareNotFound) as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) return webob.Response(status_int=202) @wsgi.Controller.authorize('manage_snapshot') def _manage(self, req, body): """Instruct Manila to manage an existing snapshot. Required HTTP Body: { "snapshot": { "share_id": , "provider_location": } } Optional elements in 'snapshot' are: name A name for the new snapshot. description A description for the new snapshot. driver_options Driver specific dicts for the existing snapshot. """ context = req.environ['manila.context'] snapshot_data = self._validate_manage_parameters(context, body) # NOTE(vponomaryov): compatibility actions are required between API and # DB layers for 'name' and 'description' API params that are # represented in DB as 'display_name' and 'display_description' # appropriately. name = snapshot_data.get('display_name', snapshot_data.get('name')) description = snapshot_data.get( 'display_description', snapshot_data.get('description')) snapshot = { 'share_id': snapshot_data['share_id'], 'provider_location': snapshot_data['provider_location'], 'display_name': name, 'display_description': description, } driver_options = snapshot_data.get('driver_options', {}) try: snapshot_ref = self.share_api.manage_snapshot(context, snapshot, driver_options) except (exception.ShareNotFound, exception.ShareSnapshotNotFound) as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.ManageInvalidShareSnapshot as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return self._view_builder.detail(req, snapshot_ref) def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'snapshot')): msg = _("Snapshot entity not found in request body.") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('share_id', 'provider_location') data = body['snapshot'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found.") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty.") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) return data @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-reset_status') def snapshot_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') def snapshot_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-force_delete') def snapshot_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def snapshot_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.12') @wsgi.response(202) def manage(self, req, body): return self._manage(req, body) @wsgi.Controller.api_version('2.12') @wsgi.action('unmanage') def unmanage(self, req, id, body=None): return self._unmanage(req, id, body) def create_resource(): return wsgi.Resource(ShareSnapshotsController()) manila-2.0.0/manila/api/sizelimit.py0000664000567000056710000000241112701407107020505 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_middleware import sizelimit from manila.i18n import _LW LOG = log.getLogger(__name__) class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter): def __init__(self, *args, **kwargs): LOG.warning(_LW('manila.api.sizelimit:RequestBodySizeLimiter and ' 'manila.api.middleware.sizelimit:' 'RequestBodySizeLimiter ' 'are deprecated. Please use ' 'oslo_middleware.sizelimit: ' 'RequestBodySizeLimiter instead.')) super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) manila-2.0.0/manila/api/v1/0000775000567000056710000000000012701407265016457 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/v1/shares.py0000664000567000056710000005676212701407107020331 0ustar jenkinsjenkins00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" import ast import re import string from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import six import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import shares as share_views from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila import share from manila.share import share_types LOG = log.getLogger(__name__) class ShareMixin(object): """Mixin class for Share API Controllers.""" def _update(self, *args, **kwargs): db.share_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete(*args, **kwargs) def _migrate(self, *args, **kwargs): return self.share_api.migrate_share(*args, **kwargs) def show(self, req, id): """Return data about the given share.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, share) def delete(self, req, id): """Delete a share.""" context = req.environ['manila.context'] LOG.info(_LI("Delete share with id: %s"), id, context=context) try: share = self.share_api.get(context, id) # NOTE(ameade): If the share is in a consistency group, we require # it's id be specified as a param. if share.get('consistency_group_id'): consistency_group_id = req.params.get('consistency_group_id') if (share.get('consistency_group_id') and not consistency_group_id): msg = _("Must provide 'consistency_group_id' as a request " "parameter when deleting a share in a consistency " "group.") raise exc.HTTPBadRequest(explanation=msg) elif consistency_group_id != share.get('consistency_group_id'): msg = _("The specified 'consistency_group_id' does not " "match the consistency group id of the share.") raise exc.HTTPBadRequest(explanation=msg) self.share_api.delete(context, share) except exception.NotFound: raise exc.HTTPNotFound() except exception.InvalidShare as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.Conflict as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) def _migration_start(self, req, id, body, check_notify=False): """Migrate a share to the specified host.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) params = body.get('migration_start', body.get('migrate_share', body.get('os-migrate_share'))) try: host = params['host'] except KeyError: raise exc.HTTPBadRequest(explanation=_("Must specify 'host'.")) force_host_copy = params.get('force_host_copy', False) try: force_host_copy = strutils.bool_from_string(force_host_copy, strict=True) except ValueError: msg = _("Invalid value %s for 'force_host_copy'. " "Expecting a boolean.") % force_host_copy raise exc.HTTPBadRequest(explanation=msg) if check_notify: notify = params.get('notify', True) try: notify = strutils.bool_from_string(notify, strict=True) except ValueError: msg = _("Invalid value %s for 'notify'. " "Expecting a boolean.") % notify raise exc.HTTPBadRequest(explanation=msg) else: # NOTE(ganso): default notify value is True notify = True try: self.share_api.migration_start(context, share, host, force_host_copy, notify) except exception.Conflict as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) def _migration_complete(self, req, id, body): """Invokes 2nd phase of share migration.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) self.share_api.migration_complete(context, share) return webob.Response(status_int=202) def _migration_cancel(self, req, id, body): """Attempts to cancel share migration.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) self.share_api.migration_cancel(context, share) return webob.Response(status_int=202) def _migration_get_progress(self, req, id, body): """Retrieve share migration progress for a given share.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) result = self.share_api.migration_get_progress(context, share) return self._view_builder.migration_get_progress(result) def index(self, req): """Returns a summary list of shares.""" return self._get_shares(req, is_detail=False) def detail(self, req): """Returns a detailed list of shares.""" return self._get_shares(req, is_detail=True) def _get_shares(self, req, is_detail): """Returns a list of shares, transformed through view builder.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to share attrs search_opts.pop('limit', None) search_opts.pop('offset', None) sort_key = search_opts.pop('sort_key', 'created_at') sort_dir = search_opts.pop('sort_dir', 'desc') # Deserialize dicts if 'metadata' in search_opts: search_opts['metadata'] = ast.literal_eval(search_opts['metadata']) if 'extra_specs' in search_opts: search_opts['extra_specs'] = ast.literal_eval( search_opts['extra_specs']) # NOTE(vponomaryov): Manila stores in DB key 'display_name', but # allows to use both keys 'name' and 'display_name'. It is leftover # from Cinder v1 and v2 APIs. if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') if sort_key == 'name': sort_key = 'display_name' common.remove_invalid_options( context, search_opts, self._get_share_search_options()) shares = self.share_api.get_all( context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir) limited_list = common.limited(shares, req) if is_detail: shares = self._view_builder.detail_list(req, limited_list) else: shares = self._view_builder.summary_list(req, limited_list) return shares def _get_share_search_options(self): """Return share search options allowed by non-admin.""" # NOTE(vponomaryov): share_server_id depends on policy, allow search # by it for non-admins in case policy changed. # Also allow search by extra_specs in case policy # for it allows non-admin access. return ( 'display_name', 'status', 'share_server_id', 'volume_type_id', 'share_type_id', 'snapshot_id', 'host', 'share_network_id', 'is_public', 'metadata', 'extra_specs', 'sort_key', 'sort_dir', 'consistency_group_id', 'cgsnapshot_id' ) def update(self, req, id, body): """Update a share.""" context = req.environ['manila.context'] if not body or 'share' not in body: raise exc.HTTPUnprocessableEntity() share_data = body['share'] valid_update_keys = ( 'display_name', 'display_description', 'is_public', ) update_dict = {key: share_data[key] for key in valid_update_keys if key in share_data} try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() share = self.share_api.update(context, share, update_dict) share.update(update_dict) return self._view_builder.detail(req, share) def create(self, req, body): # Remove consistency group attributes body.get('share', {}).pop('consistency_group_id', None) share = self._create(req, body) return share def _create(self, req, body): """Creates a new share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share'): raise exc.HTTPUnprocessableEntity() share = body['share'] # NOTE(rushiagr): Manila API allows 'name' instead of 'display_name'. if share.get('name'): share['display_name'] = share.get('name') del share['name'] # NOTE(rushiagr): Manila API allows 'description' instead of # 'display_description'. if share.get('description'): share['display_description'] = share.get('description') del share['description'] size = share['size'] share_proto = share['share_proto'].upper() msg = (_LI("Create %(share_proto)s share of %(size)s GB") % {'share_proto': share_proto, 'size': size}) LOG.info(msg, context=context) availability_zone = share.get('availability_zone') if availability_zone: try: db.availability_zone_get(context, availability_zone) except exception.AvailabilityZoneNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) kwargs = { 'availability_zone': availability_zone, 'metadata': share.get('metadata'), 'is_public': share.get('is_public', False), 'consistency_group_id': share.get('consistency_group_id') } snapshot_id = share.get('snapshot_id') if snapshot_id: snapshot = self.share_api.get_snapshot(context, snapshot_id) else: snapshot = None kwargs['snapshot_id'] = snapshot_id share_network_id = share.get('share_network_id') if snapshot: # Need to check that share_network_id from snapshot's # parents share equals to share_network_id from args. # If share_network_id is empty than update it with # share_network_id of parent share. parent_share = self.share_api.get(context, snapshot['share_id']) parent_share_net_id = parent_share.instance['share_network_id'] if share_network_id: if share_network_id != parent_share_net_id: msg = "Share network ID should be the same as snapshot's" \ " parent share's or empty" raise exc.HTTPBadRequest(explanation=msg) elif parent_share_net_id: share_network_id = parent_share_net_id if share_network_id: try: self.share_api.get_share_network( context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) kwargs['share_network_id'] = share_network_id display_name = share.get('display_name') display_description = share.get('display_description') if 'share_type' in share and 'volume_type' in share: msg = 'Cannot specify both share_type and volume_type' raise exc.HTTPBadRequest(explanation=msg) req_share_type = share.get('share_type', share.get('volume_type')) share_type = None if req_share_type: try: if not uuidutils.is_uuid_like(req_share_type): share_type = share_types.get_share_type_by_name( context, req_share_type) else: share_type = share_types.get_share_type( context, req_share_type) except exception.ShareTypeNotFound: msg = _("Share type not found.") raise exc.HTTPNotFound(explanation=msg) elif not snapshot: def_share_type = share_types.get_default_share_type() if def_share_type: share_type = def_share_type # Only use in create share feature. Create share from snapshot # and create share with consistency group features not # need this check. if (not share_network_id and not snapshot and not share.get('consistency_group_id') and share_type and share_type.get('extra_specs') and (strutils.bool_from_string(share_type.get('extra_specs'). get('driver_handles_share_servers')))): msg = _('Share network must be set when the ' 'driver_handles_share_servers is true.') raise exc.HTTPBadRequest(explanation=msg) if share_type: kwargs['share_type'] = share_type new_share = self.share_api.create(context, share_proto, size, display_name, display_description, **kwargs) return self._view_builder.detail(req, new_share) @staticmethod def _validate_common_name(access): """Validate common name passed by user. 'access' is used as the certificate's CN (common name) to which access is allowed or denied by the backend. The standard allows for just about any string in the common name. The meaning of a string depends on its interpretation and is limited to 64 characters. """ if len(access) == 0 or len(access) > 64: exc_str = _('Invalid CN (common name). Must be 1-64 chars long') raise webob.exc.HTTPBadRequest(explanation=exc_str) @staticmethod def _validate_username(access): valid_username_re = '[\w\.\-_\`;\'\{\}\[\]\\\\]{4,32}$' username = access if not re.match(valid_username_re, username): exc_str = ('Invalid user or group name. Must be 4-32 characters ' 'and consist of alphanumeric characters and ' 'special characters ]{.-_\'`;}[\\') raise webob.exc.HTTPBadRequest(explanation=exc_str) @staticmethod def _validate_ip_range(ip_range): ip_range = ip_range.split('/') exc_str = ('Supported ip format examples:\n' '\t10.0.0.2, 10.0.0.0/24') if len(ip_range) > 2: raise webob.exc.HTTPBadRequest(explanation=exc_str) if len(ip_range) == 2: try: prefix = int(ip_range[1]) if prefix < 0 or prefix > 32: raise ValueError() except ValueError: msg = 'IP prefix should be in range from 0 to 32' raise webob.exc.HTTPBadRequest(explanation=msg) ip_range = ip_range[0].split('.') if len(ip_range) != 4: raise webob.exc.HTTPBadRequest(explanation=exc_str) for item in ip_range: try: if 0 <= int(item) <= 255: continue raise ValueError() except ValueError: raise webob.exc.HTTPBadRequest(explanation=exc_str) @staticmethod def _validate_cephx_id(cephx_id): if not cephx_id: raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs may not be empty')) # This restriction may be lifted in Ceph in the future: # http://tracker.ceph.com/issues/14626 if not set(cephx_id) <= set(string.printable): raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs must consist of ASCII printable characters')) # Periods are technically permitted, but we restrict them here # to avoid confusion where users are unsure whether they should # include the "client." prefix: otherwise they could accidentally # create "client.client.foobar". if '.' in cephx_id: raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs may not contain periods')) def _allow_access(self, req, id, body, enable_ceph=False): """Add share access rule.""" context = req.environ['manila.context'] access_data = body.get('allow_access', body.get('os-allow_access')) share = self.share_api.get(context, id) access_type = access_data['access_type'] access_to = access_data['access_to'] if access_type == 'ip': self._validate_ip_range(access_to) elif access_type == 'user': self._validate_username(access_to) elif access_type == 'cert': self._validate_common_name(access_to.strip()) elif access_type == "cephx" and enable_ceph: self._validate_cephx_id(access_to) else: if enable_ceph: exc_str = _("Only 'ip', 'user', 'cert' or 'cephx' access " "types are supported.") else: exc_str = _("Only 'ip', 'user' or 'cert' access types " "are supported.") raise webob.exc.HTTPBadRequest(explanation=exc_str) try: access = self.share_api.allow_access( context, share, access_type, access_to, access_data.get('access_level')) except exception.ShareAccessExists as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return {'access': access} def _deny_access(self, req, id, body): """Remove share access rule.""" context = req.environ['manila.context'] access_id = body.get( 'deny_access', body.get('os-deny_access'))['access_id'] try: access = self.share_api.access_get(context, access_id) if access.share_id != id: raise exception.NotFound() share = self.share_api.get(context, id) except exception.NotFound as error: raise webob.exc.HTTPNotFound(explanation=six.text_type(error)) self.share_api.deny_access(context, share, access) return webob.Response(status_int=202) def _access_list(self, req, id, body): """list share access rules.""" context = req.environ['manila.context'] share = self.share_api.get(context, id) access_list = self.share_api.access_get_all(context, share) return {'access_list': access_list} def _extend(self, req, id, body): """Extend size of a share.""" context = req.environ['manila.context'] share, size = self._get_valid_resize_parameters( context, id, body, 'os-extend') try: self.share_api.extend(context, share, size) except (exception.InvalidInput, exception.InvalidShare) as e: raise webob.exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.ShareSizeExceedsAvailableQuota as e: raise webob.exc.HTTPForbidden(explanation=six.text_type(e)) return webob.Response(status_int=202) def _shrink(self, req, id, body): """Shrink size of a share.""" context = req.environ['manila.context'] share, size = self._get_valid_resize_parameters( context, id, body, 'os-shrink') try: self.share_api.shrink(context, share, size) except (exception.InvalidInput, exception.InvalidShare) as e: raise webob.exc.HTTPBadRequest(explanation=six.text_type(e)) return webob.Response(status_int=202) def _get_valid_resize_parameters(self, context, id, body, action): try: share = self.share_api.get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=six.text_type(e)) try: size = int(body.get(action, body.get(action.split('os-')[-1]))['new_size']) except (KeyError, ValueError, TypeError): msg = _("New share size must be specified as an integer.") raise webob.exc.HTTPBadRequest(explanation=msg) return share, size class ShareController(wsgi.Controller, ShareMixin, wsgi.AdminActionsMixin): """The Shares API v1 controller for the OpenStack API.""" resource_name = 'share' _view_builder_class = share_views.ViewBuilder def __init__(self): super(self.__class__, self).__init__() self.share_api = share.API() @wsgi.action('os-reset_status') def share_reset_status(self, req, id, body): """Reset status of a share.""" return self._reset_status(req, id, body) @wsgi.action('os-force_delete') def share_force_delete(self, req, id, body): """Delete a share, bypassing the check for status.""" return self._force_delete(req, id, body) @wsgi.action('os-allow_access') def allow_access(self, req, id, body): """Add share access rule.""" return self._allow_access(req, id, body) @wsgi.action('os-deny_access') def deny_access(self, req, id, body): """Remove share access rule.""" return self._deny_access(req, id, body) @wsgi.action('os-access_list') def access_list(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.action('os-extend') def extend(self, req, id, body): """Extend size of a share.""" return self._extend(req, id, body) @wsgi.action('os-shrink') def shrink(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) def create_resource(): return wsgi.Resource(ShareController()) manila-2.0.0/manila/api/v1/limits.py0000664000567000056710000003350612701407107020334 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils from six.moves import http_client import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.api.views import limits as limits_views from manila.i18n import _ from manila import quota from manila import wsgi as base_wsgi QUOTAS = quota.QUOTAS # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['manila.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=True) abs_limits = {'in_use': {}, 'limit': {}} for k, v in quotas.items(): abs_limits['limit'][k] = v['limit'] abs_limits['in_use'][k] = v['in_use'] rate_limits = req.environ.get("manila.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware`. `RateLimitingMiddleware` wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("manila.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["manila.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith('user:'): username = key[5:] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if 200 >= resp.status < 300: return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string. This simply doesn't apply for the limit proxy. @return: Empty list. """ return [] manila-2.0.0/manila/api/v1/share_manage.py0000664000567000056710000001214112701407112021431 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from webob import exc from manila.api.openstack import wsgi from manila.api.views import shares as share_views from manila import exception from manila.i18n import _ from manila import share from manila.share import share_types from manila.share import utils as share_utils from manila import utils class ShareManageMixin(object): @wsgi.Controller.authorize('manage') def _manage(self, req, body): context = req.environ['manila.context'] share_data = self._validate_manage_parameters(context, body) # NOTE(vponomaryov): compatibility actions are required between API and # DB layers for 'name' and 'description' API params that are # represented in DB as 'display_name' and 'display_description' # appropriately. name = share_data.get('display_name', share_data.get('name')) description = share_data.get( 'display_description', share_data.get('description')) share = { 'host': share_data['service_host'], 'export_location': share_data['export_path'], 'share_proto': share_data['protocol'].upper(), 'share_type_id': share_data['share_type_id'], 'display_name': name, 'display_description': description, } if share_data.get('is_public') is not None: share['is_public'] = share_data['is_public'] driver_options = share_data.get('driver_options', {}) try: share_ref = self.share_api.manage(context, share, driver_options) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.ManilaException as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return self._view_builder.detail(req, share_ref) def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share')): msg = _("Share entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('export_path', 'service_host', 'protocol') data = body['share'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not share_utils.extract_host(data['service_host'], 'pool'): msg = _("service_host parameter should contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(data['service_host'])) except exception.ServiceNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) data['share_type_id'] = self._get_share_type_id( context, data.get('share_type')) return data @staticmethod def _get_share_type_id(context, share_type): try: stype = share_types.get_share_type_by_name_or_id(context, share_type) return stype['id'] except exception.ShareTypeNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) class ShareManageController(ShareManageMixin, wsgi.Controller): """Allows existing share to be 'managed' by Manila.""" resource_name = "share" _view_builder_class = share_views.ViewBuilder def __init__(self, *args, **kwargs): super(self.__class__, self).__init__(*args, **kwargs) self.share_api = share.API() @wsgi.Controller.api_version('1.0', '2.6') def create(self, req, body): """Legacy method for 'manage share' operation. Should be removed when minimum API version becomes equal to or greater than v2.7 """ body.get('share', {}).pop('is_public', None) return self._manage(req, body) def create_resource(): return wsgi.Resource(ShareManageController()) manila-2.0.0/manila/api/v1/share_unmanage.py0000664000567000056710000000605612701407107022010 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six import webob from webob import exc from manila.api.openstack import wsgi from manila.common import constants from manila import exception from manila.i18n import _, _LI from manila import share LOG = log.getLogger(__name__) class ShareUnmanageMixin(object): @wsgi.Controller.authorize("unmanage") def _unmanage(self, req, id, body=None): """Unmanage a share.""" context = req.environ['manila.context'] LOG.info(_LI("Unmanage share with id: %s"), id, context=context) try: share = self.share_api.get(context, id) if share['instance'].get('share_server_id'): msg = _("Operation 'unmanage' is not supported for shares " "that are created on top of share servers " "(created with share-networks).") raise exc.HTTPForbidden(explanation=msg) elif share['status'] in constants.TRANSITIONAL_STATUSES: msg = _("Share with transitional state can not be unmanaged. " "Share '%(s_id)s' is in '%(state)s' state.") % dict( state=share['status'], s_id=share['id']) raise exc.HTTPForbidden(explanation=msg) snapshots = self.share_api.db.share_snapshot_get_all_for_share( context, id) if snapshots: msg = _("Share '%(s_id)s' can not be unmanaged because it has " "'%(amount)s' dependent snapshot(s).") % { 's_id': id, 'amount': len(snapshots)} raise exc.HTTPForbidden(explanation=msg) self.share_api.unmanage(context, share) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except (exception.InvalidShare, exception.PolicyNotAuthorized) as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) return webob.Response(status_int=202) class ShareUnmanageController(ShareUnmanageMixin, wsgi.Controller): """The Unmanage API controller for the OpenStack API.""" resource_name = "share" def __init__(self, *args, **kwargs): super(self.__class__, self).__init__(*args, **kwargs) self.share_api = share.API() @wsgi.Controller.api_version('1.0', '2.6') def unmanage(self, req, id): return self._unmanage(req, id) def create_resource(): return wsgi.Resource(ShareUnmanageController()) manila-2.0.0/manila/api/v1/share_servers.py0000664000567000056710000001113512701407107021700 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import six import webob from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_servers as share_servers_views from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import share LOG = log.getLogger(__name__) class ShareServerController(wsgi.Controller): """The Share Server API controller for the OpenStack API.""" def __init__(self): self.share_api = share.API() self._view_builder_class = share_servers_views.ViewBuilder self.resource_name = 'share_server' super(ShareServerController, self).__init__() @wsgi.Controller.authorize def index(self, req): """Returns a list of share servers.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) share_servers = db_api.share_server_get_all(context) for s in share_servers: s.project_id = s.share_network['project_id'] if s.share_network['name']: s.share_network_name = s.share_network['name'] else: s.share_network_name = s.share_network_id if search_opts: for k, v in search_opts.items(): share_servers = [s for s in share_servers if (hasattr(s, k) and s[k] == v or k == 'share_network' and v in [s.share_network['name'], s.share_network['id']])] return self._view_builder.build_share_servers(share_servers) @wsgi.Controller.authorize def show(self, req, id): """Return data about the requested share server.""" context = req.environ['manila.context'] try: server = db_api.share_server_get(context, id) server.project_id = server.share_network["project_id"] if server.share_network['name']: server.share_network_name = server.share_network['name'] else: server.share_network_name = server.share_network_id except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) return self._view_builder.build_share_server(server) @wsgi.Controller.authorize def details(self, req, id): """Return details for requested share server.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get(context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) return self._view_builder.build_share_server_details( share_server['backend_details']) @wsgi.Controller.authorize def delete(self, req, id): """Delete specified share server.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get(context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) allowed_statuses = [constants.STATUS_ERROR, constants.STATUS_ACTIVE] if share_server['status'] not in allowed_statuses: data = { 'status': share_server['status'], 'allowed_statuses': allowed_statuses, } msg = _("Share server's actual status is %(status)s, allowed " "statuses for deletion are %(allowed_statuses)s.") % (data) raise exc.HTTPForbidden(explanation=msg) LOG.debug("Deleting share server with id: %s.", id) try: self.share_api.delete_share_server(context, share_server) except exception.ShareServerInUse as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) def create_resource(): return wsgi.Resource(ShareServerController()) manila-2.0.0/manila/api/v1/share_networks.py0000664000567000056710000003527712701407107022100 0ustar jenkinsjenkins00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" from oslo_db import exception as db_exception from oslo_log import log from oslo_utils import timeutils import six import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_networks as share_networks_views from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila.i18n import _LE from manila.i18n import _LW from manila import policy from manila import quota from manila.share import rpcapi as share_rpcapi RESOURCE_NAME = 'share_network' RESOURCES_NAME = 'share_networks' LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS class ShareNetworkController(wsgi.Controller): """The Share Network API controller for the OpenStack API.""" _view_builder_class = share_networks_views.ViewBuilder def __init__(self): super(ShareNetworkController, self).__init__() self.share_rpcapi = share_rpcapi.ShareAPI() def show(self, req, id): """Return data about the requested network info.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'show') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) return self._view_builder.build_share_network(share_network) def delete(self, req, id): """Delete specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'delete') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) share_instances = ( db_api.share_instances_get_all_by_share_network(context, id) ) if share_instances: msg = _("Can not delete share network %(id)s, it has " "%(len)s share(s).") % {'id': id, 'len': len(share_instances)} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) # NOTE(ameade): Do not allow deletion of share network used by CG cg_count = db_api.count_consistency_groups_in_share_network(context, id) if cg_count: msg = _("Can not delete share network %(id)s, it has %(len)s " "consistency group(s).") % {'id': id, 'len': cg_count} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) for share_server in share_network['share_servers']: self.share_rpcapi.delete_share_server(context, share_server) db_api.share_network_delete(context, id) try: reservations = QUOTAS.reserve( context, project_id=share_network['project_id'], share_networks=-1, user_id=share_network['user_id']) except Exception: LOG.exception(_LE("Failed to update usages deleting " "share-network.")) else: QUOTAS.commit(context, reservations, project_id=share_network['project_id'], user_id=share_network['user_id']) return webob.Response(status_int=202) def _get_share_networks(self, req, is_detail=True): """Returns a list of share networks.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) if ('all_tenants' in search_opts or ('project_id' in search_opts and search_opts['project_id'] != context.project_id)): policy.check_policy(context, RESOURCE_NAME, 'get_all_share_networks') if 'security_service_id' in search_opts: networks = db_api.share_network_get_all_by_security_service( context, search_opts['security_service_id']) elif ('project_id' in search_opts and search_opts['project_id'] != context.project_id): networks = db_api.share_network_get_all_by_project( context, search_opts['project_id']) elif 'all_tenants' in search_opts: networks = db_api.share_network_get_all(context) else: networks = db_api.share_network_get_all_by_project( context, context.project_id) date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.''' if 'created_since' in search_opts: try: created_since = timeutils.parse_strtime( search_opts['created_since'], fmt="%Y-%m-%d") except ValueError: msg = date_parsing_error_msg % search_opts['created_since'] raise exc.HTTPBadRequest(explanation=msg) networks = [network for network in networks if network['created_at'] >= created_since] if 'created_before' in search_opts: try: created_before = timeutils.parse_strtime( search_opts['created_before'], fmt="%Y-%m-%d") except ValueError: msg = date_parsing_error_msg % search_opts['created_before'] raise exc.HTTPBadRequest(explanation=msg) networks = [network for network in networks if network['created_at'] <= created_before] opts_to_remove = [ 'all_tenants', 'created_since', 'created_before', 'limit', 'offset', 'security_service_id', ] for opt in opts_to_remove: search_opts.pop(opt, None) if search_opts: for key, value in search_opts.items(): if key in ['ip_version', 'segmentation_id']: value = int(value) networks = [network for network in networks if network[key] == value] limited_list = common.limited(networks, req) return self._view_builder.build_share_networks(limited_list, is_detail) def index(self, req): """Returns a summary list of share networks.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'index') return self._get_share_networks(req, is_detail=False) def detail(self, req): """Returns a detailed list of share networks.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'detail') return self._get_share_networks(req) @staticmethod def _verify_no_mutually_exclusive_data(share_network, update_data=None): update_data = update_data or dict() neutron_net_id = ( share_network.get('neutron_net_id') or update_data.get('neutron_net_id')) neutron_subnet_id = ( share_network.get('neutron_subnet_id') or update_data.get('neutron_subnet_id')) nova_net_id = ( share_network.get('nova_net_id') or update_data.get('nova_net_id')) if nova_net_id and (neutron_net_id or neutron_subnet_id): msg = _("Neutron net data and Nova net data are mutually " "exclusive. Only one of these are allowed at a time.") raise exc.HTTPBadRequest(explanation=msg) def update(self, req, id, body): """Update specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'update') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) update_values = body[RESOURCE_NAME] self._verify_no_mutually_exclusive_data(share_network, update_values) if share_network['share_servers']: for value in update_values: if value not in ['name', 'description']: msg = _("Cannot update share network %s. It is used by " "share servers. Only 'name' and 'description' " "fields are available for update")\ % share_network['id'] raise exc.HTTPForbidden(explanation=msg) try: share_network = db_api.share_network_update(context, id, update_values) except db_exception.DBError: msg = "Could not save supplied data due to database error" raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(share_network) def create(self, req, body): """Creates a new share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() values = body[RESOURCE_NAME] values['project_id'] = context.project_id values['user_id'] = context.user_id self._verify_no_mutually_exclusive_data(values) try: reservations = QUOTAS.reserve(context, share_networks=1) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'share_networks' in overs: LOG.warning(_LW("Quota exceeded for %(s_pid)s, " "tried to create " "share-network (%(d_consumed)d of %(d_quota)d " "already consumed)."), { 's_pid': context.project_id, 'd_consumed': _consumed('share_networks'), 'd_quota': quotas['share_networks']}) raise exception.ShareNetworksLimitExceeded( allowed=quotas['share_networks']) else: try: share_network = db_api.share_network_create(context, values) except db_exception.DBError: msg = "Could not save supplied data due to database error" raise exc.HTTPBadRequest(explanation=msg) QUOTAS.commit(context, reservations) return self._view_builder.build_share_network(share_network) def action(self, req, id, body): _actions = { 'add_security_service': self._add_security_service, 'remove_security_service': self._remove_security_service } for action, data in body.items(): try: return _actions[action](req, id, data) except KeyError: msg = _("Share networks does not have %s action") % action raise exc.HTTPBadRequest(explanation=msg) def _add_security_service(self, req, id, data): """Associate share network with a given security service.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'add_security_service') share_network = db_api.share_network_get(context, id) if share_network['share_servers']: msg = _("Cannot add security services. Share network is used.") raise exc.HTTPForbidden(explanation=msg) security_service = db_api.security_service_get( context, data['security_service_id']) for attached_service in share_network['security_services']: if attached_service['type'] == security_service['type']: msg = _("Cannot add security service to share network. " "Security service with '%(ss_type)s' type already " "added to '%(sn_id)s' share network") % { 'ss_type': security_service['type'], 'sn_id': share_network['id']} raise exc.HTTPConflict(explanation=msg) try: share_network = db_api.share_network_add_security_service( context, id, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.ShareNetworkSecurityServiceAssociationError as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return self._view_builder.build_share_network(share_network) def _remove_security_service(self, req, id, data): """Dissociate share network from a given security service.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'remove_security_service') share_network = db_api.share_network_get(context, id) if share_network['share_servers']: msg = _("Cannot remove security services. Share network is used.") raise exc.HTTPForbidden(explanation=msg) try: share_network = db_api.share_network_remove_security_service( context, id, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.ShareNetworkSecurityServiceDissociationError as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return self._view_builder.build_share_network(share_network) def create_resource(): return wsgi.Resource(ShareNetworkController()) manila-2.0.0/manila/api/v1/__init__.py0000664000567000056710000000000012701407107020551 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/api/v1/share_metadata.py0000664000567000056710000001260012701407107021765 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from webob import exc from manila.api.openstack import wsgi from manila import exception from manila.i18n import _ from manila import share class ShareMetadataController(object): """The share metadata API controller for the OpenStack API.""" def __init__(self): self.share_api = share.API() super(ShareMetadataController, self).__init__() def _get_metadata(self, context, share_id): try: share = self.share_api.get(context, share_id) meta = self.share_api.get_share_metadata(context, share) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) return meta def index(self, req, share_id): """Returns the list of metadata for a given share.""" context = req.environ['manila.context'] return {'metadata': self._get_metadata(context, share_id)} def create(self, req, share_id, body): try: metadata = body['metadata'] except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) context = req.environ['manila.context'] new_metadata = self._update_share_metadata(context, share_id, metadata, delete=False) return {'metadata': new_metadata} def update(self, req, share_id, id, body): try: meta_item = body['meta'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] self._update_share_metadata(context, share_id, meta_item, delete=False) return {'meta': meta_item} def update_all(self, req, share_id, body): try: metadata = body['metadata'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] new_metadata = self._update_share_metadata(context, share_id, metadata, delete=True) return {'metadata': new_metadata} def _update_share_metadata(self, context, share_id, metadata, delete=False): try: share = self.share_api.get(context, share_id) return self.share_api.update_share_metadata(context, share, metadata, delete) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidShareMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidShareMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) def show(self, req, share_id, id): """Return a single metadata item.""" context = req.environ['manila.context'] data = self._get_metadata(context, share_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, share_id, id): """Deletes an existing metadata.""" context = req.environ['manila.context'] metadata = self._get_metadata(context, share_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: share = self.share_api.get(context, share_id) self.share_api.delete_share_metadata(context, share, id) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) return webob.Response(status_int=200) def create_resource(): return wsgi.Resource(ShareMetadataController()) manila-2.0.0/manila/api/v1/router.py0000664000567000056710000001631712701407107020354 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Share API v1. """ from manila.api import extensions import manila.api.openstack from manila.api.v1 import limits from manila.api.v1 import scheduler_stats from manila.api.v1 import security_service from manila.api.v1 import share_manage from manila.api.v1 import share_metadata from manila.api.v1 import share_networks from manila.api.v1 import share_servers from manila.api.v1 import share_snapshots from manila.api.v1 import share_types_extra_specs from manila.api.v1 import share_unmanage from manila.api.v1 import shares from manila.api.v2 import availability_zones from manila.api.v2 import quota_class_sets from manila.api.v2 import quota_sets from manila.api.v2 import services from manila.api.v2 import share_types from manila.api import versions class APIRouter(manila.api.openstack.APIRouter): """Route API requests. Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources["availability_zones"] = ( availability_zones.create_resource_legacy()) mapper.resource("availability-zone", "os-availability-zone", controller=self.resources["availability_zones"]) self.resources["services"] = services.create_resource_legacy() mapper.resource("service", "os-services", controller=self.resources["services"]) self.resources["quota_sets"] = quota_sets.create_resource_legacy() mapper.resource("quota-set", "os-quota-sets", controller=self.resources["quota_sets"], member={'defaults': 'GET'}) self.resources["quota_class_sets"] = ( quota_class_sets.create_resource_legacy()) mapper.resource("quota-class-set", "os-quota-class-sets", controller=self.resources["quota_class_sets"]) self.resources["share_manage"] = share_manage.create_resource() mapper.resource("share_manage", "os-share-manage", controller=self.resources["share_manage"]) self.resources["share_unmanage"] = share_unmanage.create_resource() mapper.resource("share_unmanage", "os-share-unmanage", controller=self.resources["share_unmanage"], member={'unmanage': 'POST'}) self.resources['shares'] = shares.create_resource() mapper.resource("share", "shares", controller=self.resources['shares'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['snapshots'] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_metadata'] = share_metadata.create_resource() share_metadata_controller = self.resources['share_metadata'] mapper.resource("share_metadata", "metadata", controller=share_metadata_controller, parent_resource=dict(member_name='share', collection_name='shares')) mapper.connect("metadata", "/{project_id}/shares/{share_id}/metadata", controller=share_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources["security_services"] = \ security_service.create_resource() mapper.resource("security-service", "security-services", controller=self.resources['security_services'], collection={'detail': 'GET'}) self.resources['share_networks'] = share_networks.create_resource() mapper.resource(share_networks.RESOURCE_NAME, 'share-networks', controller=self.resources['share_networks'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_servers'] = share_servers.create_resource() mapper.resource('share_server', 'share-servers', controller=self.resources['share_servers']) mapper.connect('details', '/{project_id}/share-servers/{id}/details', controller=self.resources['share_servers'], action='details', conditions={"method": ['GET']}) self.resources['types'] = share_types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], collection={'detail': 'GET', 'default': 'GET'}, member={'action': 'POST', 'os-share-type-access': 'GET'}) self.resources['extra_specs'] = ( share_types_extra_specs.create_resource()) mapper.resource('extra_spec', 'extra_specs', controller=self.resources['extra_specs'], parent_resource=dict(member_name='type', collection_name='types')) self.resources['scheduler_stats'] = scheduler_stats.create_resource() mapper.connect('pools', '/{project_id}/scheduler-stats/pools', controller=self.resources['scheduler_stats'], action='pools_index', conditions={'method': ['GET']}) mapper.connect('pools', '/{project_id}/scheduler-stats/pools/detail', controller=self.resources['scheduler_stats'], action='pools_detail', conditions={'method': ['GET']}) manila-2.0.0/manila/api/v1/share_types_extra_specs.py0000664000567000056710000001424412701407107023757 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six import webob from manila.api import common from manila.api.openstack import wsgi from manila import db from manila import exception from manila.i18n import _ from manila import rpc from manila.share import share_types class ShareTypeExtraSpecsController(wsgi.Controller): """The share type extra specs API controller for the OpenStack API.""" resource_name = 'share_types_extra_spec' def _get_extra_specs(self, context, type_id): extra_specs = db.share_type_extra_specs_get(context, type_id) specs_dict = {} for key, value in extra_specs.items(): specs_dict[key] = value return dict(extra_specs=specs_dict) def _check_type(self, context, type_id): try: share_types.get_share_type(context, type_id) except exception.NotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) def _verify_extra_specs(self, extra_specs, verify_all_required=True): if verify_all_required: try: share_types.get_valid_required_extra_specs(extra_specs) except exception.InvalidExtraSpec as e: raise webob.exc.HTTPBadRequest(explanation=six.text_type(e)) def is_valid_string(v): return isinstance(v, six.string_types) and len(v) in range(1, 256) def is_valid_extra_spec(k, v): valid_extra_spec_key = is_valid_string(k) valid_type = is_valid_string(v) or isinstance(v, bool) valid_required_extra_spec = ( share_types.is_valid_required_extra_spec(k, v) in (None, True)) return (valid_extra_spec_key and valid_type and valid_required_extra_spec) for k, v in extra_specs.items(): if is_valid_string(k) and isinstance(v, dict): self._verify_extra_specs(v) elif not is_valid_extra_spec(k, v): expl = _('Invalid extra_spec: %(key)s: %(value)s') % { 'key': k, 'value': v } raise webob.exc.HTTPBadRequest(explanation=expl) @wsgi.Controller.authorize def index(self, req, type_id): """Returns the list of extra specs for a given share type.""" context = req.environ['manila.context'] self._check_type(context, type_id) return self._get_extra_specs(context, type_id) @wsgi.Controller.authorize def create(self, req, type_id, body=None): context = req.environ['manila.context'] if not self.is_valid_body(body, 'extra_specs'): raise webob.exc.HTTPBadRequest() self._check_type(context, type_id) specs = body['extra_specs'] self._verify_extra_specs(specs, False) self._check_key_names(specs.keys()) db.share_type_extra_specs_update_or_create(context, type_id, specs) notifier_info = dict(type_id=type_id, specs=specs) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.create', notifier_info) return body @wsgi.Controller.authorize def update(self, req, type_id, id, body=None): context = req.environ['manila.context'] if not body: expl = _('Request body empty') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(body) > 1: expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) self._verify_extra_specs(body, False) db.share_type_extra_specs_update_or_create(context, type_id, body) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.update', notifier_info) return body @wsgi.Controller.authorize def show(self, req, type_id, id): """Return a single extra spec item.""" context = req.environ['manila.context'] self._check_type(context, type_id) specs = self._get_extra_specs(context, type_id) if id in specs['extra_specs']: return {id: specs['extra_specs'][id]} else: raise webob.exc.HTTPNotFound() @wsgi.Controller.authorize def delete(self, req, type_id, id): """Deletes an existing extra spec.""" context = req.environ['manila.context'] self._check_type(context, type_id) if id in share_types.get_undeletable_extra_specs(): msg = _("Extra spec '%s' can't be deleted.") % id raise webob.exc.HTTPForbidden(explanation=msg) try: db.share_type_extra_specs_delete(context, type_id, id) except exception.ShareTypeExtraSpecsNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.delete', notifier_info) return webob.Response(status_int=202) def _check_key_names(self, keys): if not common.validate_key_names(keys): expl = _('Key names can only contain alphanumeric characters, ' 'underscores, periods, colons and hyphens.') raise webob.exc.HTTPBadRequest(explanation=expl) def create_resource(): return wsgi.Resource(ShareTypeExtraSpecsController()) manila-2.0.0/manila/api/v1/security_service.py0000664000567000056710000002032712701407107022417 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security service api.""" from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import security_service as security_service_views from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila.i18n import _LI from manila import policy RESOURCE_NAME = 'security_service' LOG = log.getLogger(__name__) class SecurityServiceController(wsgi.Controller): """The Shares API controller for the OpenStack API.""" _view_builder_class = security_service_views.ViewBuilder def show(self, req, id): """Return data about the given security service.""" context = req.environ['manila.context'] try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'show', security_service) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, security_service) def delete(self, req, id): """Delete a security service.""" context = req.environ['manila.context'] LOG.info(_LI("Delete security service with id: %s"), id, context=context) try: security_service = db.security_service_get(context, id) except exception.NotFound: raise exc.HTTPNotFound() share_nets = db.share_network_get_all_by_security_service( context, id) if share_nets: # Cannot delete security service # if it is assigned to share networks raise exc.HTTPForbidden() policy.check_policy(context, RESOURCE_NAME, 'delete', security_service) db.security_service_delete(context, id) return webob.Response(status_int=202) def index(self, req): """Returns a summary list of security services.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'index') return self._get_security_services(req, is_detail=False) def detail(self, req): """Returns a detailed list of security services.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'detail') return self._get_security_services(req, is_detail=True) def _get_security_services(self, req, is_detail): """Returns a transformed list of security services. The list gets transformed through view builder. """ context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # NOTE(vponomaryov): remove 'status' from search opts # since it was removed from security service model. search_opts.pop('status', None) if 'share_network_id' in search_opts: share_nw = db.share_network_get(context, search_opts['share_network_id']) security_services = share_nw['security_services'] del search_opts['share_network_id'] else: if 'all_tenants' in search_opts: policy.check_policy(context, RESOURCE_NAME, 'get_all_security_services') security_services = db.security_service_get_all(context) else: security_services = db.security_service_get_all_by_project( context, context.project_id) search_opts.pop('all_tenants', None) common.remove_invalid_options( context, search_opts, self._get_security_services_search_options()) if search_opts: results = [] not_found = object() for ss in security_services: if all(ss.get(opt, not_found) == value for opt, value in search_opts.items()): results.append(ss) security_services = results limited_list = common.limited(security_services, req) if is_detail: security_services = self._view_builder.detail_list( req, limited_list) for ss in security_services['security_services']: share_networks = db.share_network_get_all_by_security_service( context, ss['id']) ss['share_networks'] = [sn['id'] for sn in share_networks] else: security_services = self._view_builder.summary_list( req, limited_list) return security_services def _get_security_services_search_options(self): return ('name', 'id', 'type', 'user', 'server', 'dns_ip', 'domain', ) def _share_servers_dependent_on_sn_exist(self, context, security_service_id): share_networks = db.share_network_get_all_by_security_service( context, security_service_id) for sn in share_networks: if sn['share_servers']: return True return False def update(self, req, id, body): """Update a security service.""" context = req.environ['manila.context'] if not body or 'security_service' not in body: raise exc.HTTPUnprocessableEntity() security_service_data = body['security_service'] valid_update_keys = ( 'description', 'name' ) try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'update', security_service) except exception.NotFound: raise exc.HTTPNotFound() if self._share_servers_dependent_on_sn_exist(context, id): for item in security_service_data: if item not in valid_update_keys: msg = _("Cannot update security service %s. It is " "attached to share network with share server " "associated. Only 'name' and 'description' " "fields are available for update.") % id raise exc.HTTPForbidden(explanation=msg) policy.check_policy(context, RESOURCE_NAME, 'update', security_service) security_service = db.security_service_update( context, id, security_service_data) return self._view_builder.detail(req, security_service) def create(self, req, body): """Creates a new security service.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not self.is_valid_body(body, 'security_service'): raise exc.HTTPUnprocessableEntity() security_service_args = body['security_service'] security_srv_type = security_service_args.get('type') allowed_types = constants.SECURITY_SERVICES_ALLOWED_TYPES if security_srv_type not in allowed_types: raise exception.InvalidInput( reason=(_("Invalid type %(type)s specified for security " "service. Valid types are %(types)s") % {'type': security_srv_type, 'types': ','.join(allowed_types)})) security_service_args['project_id'] = context.project_id security_service = db.security_service_create( context, security_service_args) return self._view_builder.detail(req, security_service) def create_resource(): return wsgi.Resource(SecurityServiceController()) manila-2.0.0/manila/api/v1/scheduler_stats.py0000664000567000056710000000363012701407107022222 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import wsgi from manila.api.views import scheduler_stats as scheduler_stats_views from manila.scheduler import rpcapi class SchedulerStatsController(wsgi.Controller): """The Scheduler Stats API controller for the OpenStack API.""" resource_name = 'scheduler_stats:pools' def __init__(self): self.scheduler_api = rpcapi.SchedulerAPI() self._view_builder_class = scheduler_stats_views.ViewBuilder super(SchedulerStatsController, self).__init__() @wsgi.Controller.authorize('index') def pools_index(self, req): """Returns a list of storage pools known to the scheduler.""" return self._pools(req, action='index') @wsgi.Controller.authorize('detail') def pools_detail(self, req): """Returns a detailed list of storage pools known to the scheduler.""" return self._pools(req, action='detail') def _pools(self, req, action='index'): context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) pools = self.scheduler_api.get_pools(context, filters=search_opts) detail = (action == 'detail') return self._view_builder.pools(pools, detail=detail) def create_resource(): return wsgi.Resource(SchedulerStatsController()) manila-2.0.0/manila/api/v1/share_snapshots.py0000664000567000056710000001641612701407107022240 0ustar jenkinsjenkins00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share snapshots api.""" from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_snapshots as snapshot_views from manila import db from manila import exception from manila.i18n import _, _LI from manila import share LOG = log.getLogger(__name__) class ShareSnapshotMixin(object): """Mixin class for Share Snapshot Controllers.""" def _update(self, *args, **kwargs): db.share_snapshot_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete_snapshot(*args, **kwargs) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['manila.context'] try: snapshot = self.share_api.get_snapshot(context, id) # Snapshot with no instances is filtered out. if(snapshot.get('status') is None): raise exc.HTTPNotFound() except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, snapshot) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['manila.context'] LOG.info(_LI("Delete snapshot with id: %s"), id, context=context) try: snapshot = self.share_api.get_snapshot(context, id) self.share_api.delete_snapshot(context, snapshot) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) def index(self, req): """Returns a summary list of snapshots.""" return self._get_snapshots(req, is_detail=False) def detail(self, req): """Returns a detailed list of snapshots.""" return self._get_snapshots(req, is_detail=True) def _get_snapshots(self, req, is_detail): """Returns a list of snapshots.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to share attrs search_opts.pop('limit', None) search_opts.pop('offset', None) sort_key = search_opts.pop('sort_key', 'created_at') sort_dir = search_opts.pop('sort_dir', 'desc') # NOTE(vponomaryov): Manila stores in DB key 'display_name', but # allows to use both keys 'name' and 'display_name'. It is leftover # from Cinder v1 and v2 APIs. if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') common.remove_invalid_options(context, search_opts, self._get_snapshots_search_options()) snapshots = self.share_api.get_all_snapshots( context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir, ) # Snapshots with no instances are filtered out. snapshots = list(filter(lambda x: x.get('status') is not None, snapshots)) limited_list = common.limited(snapshots, req) if is_detail: snapshots = self._view_builder.detail_list(req, limited_list) else: snapshots = self._view_builder.summary_list(req, limited_list) return snapshots def _get_snapshots_search_options(self): """Return share search options allowed by non-admin.""" return ('display_name', 'name', 'status', 'share_id', 'size') def update(self, req, id, body): """Update a snapshot.""" context = req.environ['manila.context'] if not body or 'snapshot' not in body: raise exc.HTTPUnprocessableEntity() snapshot_data = body['snapshot'] valid_update_keys = ( 'display_name', 'display_description', ) update_dict = {key: snapshot_data[key] for key in valid_update_keys if key in snapshot_data} try: snapshot = self.share_api.get_snapshot(context, id) except exception.NotFound: raise exc.HTTPNotFound() snapshot = self.share_api.snapshot_update(context, snapshot, update_dict) snapshot.update(update_dict) return self._view_builder.detail(req, snapshot) @wsgi.response(202) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] share_id = snapshot['share_id'] share = self.share_api.get(context, share_id) # Verify that share can be snapshotted if not share['snapshot_support']: msg = _("Snapshot cannot be created from share '%s', because " "share back end does not support it.") % share_id LOG.error(msg) raise exc.HTTPUnprocessableEntity(explanation=msg) LOG.info(_LI("Create snapshot from share %s"), share_id, context=context) # NOTE(rushiagr): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.get('name') del snapshot['name'] # NOTE(rushiagr): v2 API allows description instead of # display_description if 'description' in snapshot: snapshot['display_description'] = snapshot.get('description') del snapshot['description'] new_snapshot = self.share_api.create_snapshot( context, share, snapshot.get('display_name'), snapshot.get('display_description')) return self._view_builder.detail( req, dict(new_snapshot.items())) class ShareSnapshotsController(ShareSnapshotMixin, wsgi.Controller, wsgi.AdminActionsMixin): """The Share Snapshots API controller for the OpenStack API.""" resource_name = 'share_snapshot' _view_builder_class = snapshot_views.ViewBuilder def __init__(self): super(ShareSnapshotsController, self).__init__() self.share_api = share.API() @wsgi.action('os-reset_status') def snapshot_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.action('os-force_delete') def snapshot_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(ShareSnapshotsController()) manila-2.0.0/manila/rpc.py0000664000567000056710000001051312701407107016511 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', 'TRANSPORT_ALIASES', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import manila.context import manila.exception CONF = cfg.CONF TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ manila.exception.__name__, ] EXTRA_EXMODS = [] # NOTE(flaper87): The manila.openstack.common.rpc entries are # for backwards compat with Havana rpc_backend configuration # values. The manila.rpc entries are for compat with Folsom values. TRANSPORT_ALIASES = { 'manila.openstack.common.rpc.impl_kombu': 'rabbit', 'manila.openstack.common.rpc.impl_qpid': 'qpid', 'manila.openstack.common.rpc.impl_zmq': 'zmq', 'manila.rpc.impl_kombu': 'rabbit', 'manila.rpc.impl_qpid': 'qpid', 'manila.rpc.impl_zmq': 'zmq', } def init(conf): global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return manila.context.RequestContext.from_dict(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) manila-2.0.0/manila/consistency_group/0000775000567000056710000000000012701407265021135 5ustar jenkinsjenkins00000000000000manila-2.0.0/manila/consistency_group/__init__.py0000664000567000056710000000000012701407107023227 0ustar jenkinsjenkins00000000000000manila-2.0.0/manila/consistency_group/api.py0000664000567000056710000003223612701407107022261 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to consistency groups. """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils import six from manila.common import constants from manila.db import base from manila import exception from manila.i18n import _ from manila.scheduler import rpcapi as scheduler_rpcapi from manila import share from manila.share import rpcapi as share_rpcapi from manila.share import share_types CONF = cfg.CONF LOG = log.getLogger(__name__) class API(base.Base): """API for interacting with the share manager.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.share_rpcapi = share_rpcapi.ShareAPI() self.share_api = share.API() super(API, self).__init__(db_driver) def create(self, context, name=None, description=None, share_type_ids=None, source_cgsnapshot_id=None, share_network_id=None): """Create new consistency group.""" cgsnapshot = None original_cg = None if source_cgsnapshot_id: cgsnapshot = self.db.cgsnapshot_get(context, source_cgsnapshot_id) if cgsnapshot['status'] != constants.STATUS_AVAILABLE: msg = (_("Consistency group snapshot status must be %s") % constants.STATUS_AVAILABLE) raise exception.InvalidCGSnapshot(reason=msg) original_cg = self.db.consistency_group_get(context, cgsnapshot[ 'consistency_group_id']) share_type_ids = [s['share_type_id'] for s in original_cg[ 'share_types']] # Get share_type_objects share_type_objects = [] driver_handles_share_servers = None for share_type_id in (share_type_ids or []): try: share_type_object = share_types.get_share_type( context, share_type_id) except exception.ShareTypeNotFound: msg = _("Share type with id %s could not be found") raise exception.InvalidInput(msg % share_type_id) share_type_objects.append(share_type_object) extra_specs = share_type_object.get('extra_specs') if extra_specs: share_type_handle_ss = strutils.bool_from_string( extra_specs.get( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS)) if driver_handles_share_servers is None: driver_handles_share_servers = share_type_handle_ss elif not driver_handles_share_servers == share_type_handle_ss: # NOTE(ameade): if the share types have conflicting values # for driver_handles_share_servers then raise bad request msg = _("The specified share_types cannot have " "conflicting values for the " "driver_handles_share_servers extra spec.") raise exception.InvalidInput(reason=msg) if (not share_type_handle_ss) and share_network_id: msg = _("When using a share types with the " "driver_handles_share_servers extra spec as " "False, a share_network_id must not be provided.") raise exception.InvalidInput(reason=msg) try: if share_network_id: self.db.share_network_get(context, share_network_id) except exception.ShareNetworkNotFound: msg = _("The specified share network does not exist.") raise exception.InvalidInput(reason=msg) if (driver_handles_share_servers and not (source_cgsnapshot_id or share_network_id)): msg = _("When using a share type with the " "driver_handles_share_servers extra spec as " "True, a share_network_id must be provided.") raise exception.InvalidInput(reason=msg) options = { 'source_cgsnapshot_id': source_cgsnapshot_id, 'share_network_id': share_network_id, 'name': name, 'description': description, 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'share_types': share_type_ids } if original_cg: options['host'] = original_cg['host'] cg = self.db.consistency_group_create(context, options) try: if cgsnapshot: members = self.db.cgsnapshot_members_get_all( context, source_cgsnapshot_id) for member in members: share_type = share_types.get_share_type( context, member['share_type_id']) member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) self.share_api.create(context, member['share_proto'], member['size'], None, None, consistency_group_id=cg['id'], cgsnapshot_member=member, share_type=share_type, share_network_id=share_network_id) except Exception: with excutils.save_and_reraise_exception(): self.db.consistency_group_destroy(context.elevated(), cg['id']) request_spec = {'consistency_group_id': cg['id']} request_spec.update(options) request_spec['share_types'] = share_type_objects if cgsnapshot and original_cg: self.share_rpcapi.create_consistency_group( context, cg, original_cg['host']) else: self.scheduler_rpcapi.create_consistency_group( context, cg_id=cg['id'], request_spec=request_spec, filter_properties={}) return cg def delete(self, context, cg): """Delete consistency group.""" cg_id = cg['id'] if not cg['host']: self.db.consistency_group_destroy(context.elevated(), cg_id) return statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) if not cg['status'] in statuses: msg = (_("Consistency group status must be one of %(statuses)s") % {"statuses": statuses}) raise exception.InvalidConsistencyGroup(reason=msg) # NOTE(ameade): check for cgsnapshots in the CG if self.db.count_cgsnapshots_in_consistency_group(context, cg_id): msg = (_("Cannot delete a consistency group with cgsnapshots")) raise exception.InvalidConsistencyGroup(reason=msg) # NOTE(ameade): check for shares in the CG if self.db.count_shares_in_consistency_group(context, cg_id): msg = (_("Cannot delete a consistency group with shares")) raise exception.InvalidConsistencyGroup(reason=msg) cg = self.db.consistency_group_update( context, cg_id, {'status': constants.STATUS_DELETING}) self.share_rpcapi.delete_consistency_group(context, cg) def update(self, context, cg, fields): return self.db.consistency_group_update(context, cg['id'], fields) def get(self, context, cg_id): return self.db.consistency_group_get(context, cg_id) def get_all(self, context, detailed=True, search_opts=None): if search_opts is None: search_opts = {} LOG.debug("Searching for consistency_groups by: %s", six.text_type(search_opts)) # Get filtered list of consistency_groups if context.is_admin and search_opts.get('all_tenants'): consistency_groups = self.db.consistency_group_get_all( context, detailed=detailed) else: consistency_groups = self.db.consistency_group_get_all_by_project( context, context.project_id, detailed=detailed) return consistency_groups def create_cgsnapshot(self, context, name=None, description=None, consistency_group_id=None): """Create new cgsnapshot.""" options = { 'consistency_group_id': consistency_group_id, 'name': name, 'description': description, 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, } cg = self.db.consistency_group_get(context, consistency_group_id) # Check status of CG, must be active if not cg['status'] == constants.STATUS_AVAILABLE: msg = (_("Consistency group status must be %s") % constants.STATUS_AVAILABLE) raise exception.InvalidConsistencyGroup(reason=msg) # Create members for every share in the CG shares = self.db.share_get_all_by_consistency_group_id( context, consistency_group_id) # Check status of all shares, they must be active in order to snap # the CG for s in shares: if not s['status'] == constants.STATUS_AVAILABLE: msg = (_("Share %(s)s in consistency group must have status " "of %(status)s in order to create a CG snapshot") % {"s": s['id'], "status": constants.STATUS_AVAILABLE}) raise exception.InvalidConsistencyGroup(reason=msg) snap = self.db.cgsnapshot_create(context, options) try: members = [] for s in shares: member_options = { 'cgsnapshot_id': snap['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'size': s['size'], 'share_proto': s['share_proto'], 'share_type_id': s['share_type_id'], 'share_id': s['id'], 'share_instance_id': s.instance['id'] } member = self.db.cgsnapshot_member_create(context, member_options) members.append(member) # Cast to share manager self.share_rpcapi.create_cgsnapshot(context, snap, cg['host']) except Exception: with excutils.save_and_reraise_exception(): # This will delete the snapshot and all of it's members self.db.cgsnapshot_destroy(context, snap['id']) return snap def delete_cgsnapshot(self, context, snap): """Delete consistency group snapshot.""" snap_id = snap['id'] cg = self.db.consistency_group_get(context, snap['consistency_group_id']) statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) if not snap['status'] in statuses: msg = (_("Consistency group snapshot status must be one of" " %(statuses)s") % {"statuses": statuses}) raise exception.InvalidCGSnapshot(reason=msg) self.db.cgsnapshot_update(context, snap_id, {'status': constants.STATUS_DELETING}) # Cast to share manager self.share_rpcapi.delete_cgsnapshot(context, snap, cg['host']) def update_cgsnapshot(self, context, cg, fields): return self.db.cgsnapshot_update(context, cg['id'], fields) def get_cgsnapshot(self, context, snapshot_id): return self.db.cgsnapshot_get(context, snapshot_id) def get_all_cgsnapshots(self, context, detailed=True, search_opts=None): if search_opts is None: search_opts = {} LOG.debug("Searching for consistency group snapshots by: %s", six.text_type(search_opts)) # Get filtered list of consistency_groups if context.is_admin and search_opts.get('all_tenants'): cgsnapshots = self.db.cgsnapshot_get_all( context, detailed=detailed) else: cgsnapshots = self.db.cgsnapshot_get_all_by_project( context, context.project_id, detailed=detailed) return cgsnapshots def get_all_cgsnapshot_members(self, context, cgsnapshot_id): members = self.db.cgsnapshot_members_get_all(context, cgsnapshot_id) return members manila-2.0.0/doc/0000775000567000056710000000000012701407265014664 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/find_autodoc_modules.sh0000775000567000056710000000072512701407107021410 0ustar jenkinsjenkins00000000000000#!/bin/bash MANILA_DIR='manila/' # include trailing slash DOCS_DIR='source' modules='' for x in `find ${MANILA_DIR} -name '*.py' | grep -v manila/tests`; do if [ `basename ${x} .py` == "__init__" ] ; then continue fi relative=manila.`echo ${x} | sed -e 's$^'${MANILA_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` modules="${modules} ${relative}" done for mod in ${modules} ; do if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; then echo ${mod} fi done manila-2.0.0/doc/source/0000775000567000056710000000000012701407265016164 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/index.rst0000664000567000056710000000432612701407107020025 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Manila's developer documentation! ============================================ Manila is an OpenStack project to provide "Shared Filesystems as a service". * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault-Tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open Standards**: Be a reference implementation for a community-driven api * **API Compatibility**: Manila strives to provide API-compatible with popular systems like Amazon EC2 This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional draft and project documentation on Manila and other components of OpenStack can be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_. .. _`OpenStack wiki`: http://wiki.openstack.org .. _`docs.openstack.org`: http://docs.openstack.org Developer Docs ============== .. toctree:: :maxdepth: 1 devref/index man/index api/autoindex Admin Docs ========== .. toctree:: :maxdepth: 1 adminref/index API Extensions ============== Go to http://api.openstack.org for information about Manila API extensions. Information =========== .. toctree:: :maxdepth: 1 glossary Outstanding Documentation Tasks =============================== .. todolist:: Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` manila-2.0.0/doc/source/glossary.rst0000664000567000056710000000207212701407107020555 0ustar jenkinsjenkins00000000000000======== Glossary ======== .. glossary:: Manila OpenStack project to provide "Shared Filesystems as a service". manila-api Service that provides a stable RESTful API. The service authenticates and routes requests throughout the Shared Filesystem service. There is :term:`python-manilaclient` to interact with the API. python-manilaclient Command line interface to interact with :term:`Manila` via :term:`manila-api` and also a Python module to interact programmatically with :term:`Manila`. manila-scheduler Responsible for scheduling/routing requests to the appropriate :term:`manila-share` service. It does that by picking one back-end while filtering all except one back-end. manila-share Responsible for managing Shared File Service devices, specifically the back-end devices. DHSS Acronym for 'driver handles share servers'. It defines two different share driver modes when they either do handle share servers or not. Each driver is allowed to work only in one mode at once. Requirement is to support, at least, one mode. manila-2.0.0/doc/source/_ga/0000775000567000056710000000000012701407265016712 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/_ga/layout.html0000664000567000056710000000105512701407107021111 0ustar jenkinsjenkins00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} manila-2.0.0/doc/source/man/0000775000567000056710000000000012701407265016737 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/man/index.rst0000664000567000056710000000030112701407107020565 0ustar jenkinsjenkins00000000000000Command-Line Utilities ====================== In this section you will find information on Manila's command-line utilities. Reference --------- .. toctree:: :maxdepth: 3 manila-manage manila-2.0.0/doc/source/man/manila-manage.rst0000664000567000056710000000526012701407107022156 0ustar jenkinsjenkins00000000000000============= manila-manage ============= ------------------------------------- control and manage shared filesystems ------------------------------------- :Author: openstack@lists.launchpad.net :Date: 2014-06-11 :Copyright: OpenStack LLC :Version: 2014.2 :Manual section: 1 :Manual group: shared filesystems SYNOPSIS ======== manila-manage [] DESCRIPTION =========== manila-manage controls shared filesystems service. More information about OpenStack Manila is at https://wiki.openstack.org/wiki/Manila OPTIONS ======= The standard pattern for executing a manila-manage command is: ``manila-manage []`` For example, to obtain a list of all hosts: ``manila-manage host list`` Run without arguments to see a list of available command categories: ``manila-manage`` Categories are shell, logs, service, db, host, version and config. Detailed descriptions are below. These sections describe the available categories and arguments for manila-manage. Manila Db ~~~~~~~~~ ``manila-manage db version`` Print the current database version. ``manila-manage db sync`` Sync the database up to the most recent version. This is the standard way to create the db as well. ``manila-manage db downgrade `` Downgrade database to given version. ``manila-manage db stamp `` Stamp database with given version. ``manila-manage db revision `` Generate new migration. Manila Logs ~~~~~~~~~~~ ``manila-manage logs errors`` Displays manila errors from log files. ``manila-manage logs syslog `` Displays manila alerts from syslog. Manila Shell ~~~~~~~~~~~~ ``manila-manage shell bpython`` Starts a new bpython shell. ``manila-manage shell ipython`` Starts a new ipython shell. ``manila-manage shell python`` Starts a new python shell. ``manila-manage shell run`` Starts a new shell using python. ``manila-manage shell script `` Runs the named script from the specified path with flags set. Manila Host ~~~~~~~~~~~ ``manila-manage host list`` Returns list of running manila hosts. Manila Config ~~~~~~~~~~~~~ ``manila-manage config list`` Returns list of currently set config options and its values. Manila Service ~~~~~~~~~~~~~~ ``manila-manage service list`` Returns list of manila services. Manila Version ~~~~~~~~~~~~~~ ``manila-manage version list`` Returns list of versions. FILES ===== The manila-manage.conf file contains configuration information in the form of python-gflags. BUGS ==== * Manila is sourced in Launchpad so you can view current bugs at `OpenStack Manila `__ manila-2.0.0/doc/source/conf.py0000664000567000056710000001722712701407107017467 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # manila documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.manila_todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', ] # autodoc generation is a bit aggressive and a nuisance # when doing heavy text edit cycles. Execute "export SPHINX_DEBUG=1" # in your terminal to disable if not os.getenv('SPHINX_DEBUG'): extensions += ['ext.manila_autodoc'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # Changing the path so that the Hudson build output contains GA code # and the source docs do not contain the code so local, offline sphinx builds # are "clean." templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'manila' copyright = u'2010-present, OpenStack, LLC' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from manila.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['manila.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/manila-manage', 'manila-manage', u'Cloud controller fabric', [u'OpenStack'], 1) ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = [ "git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1" ] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'maniladoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Manila.tex', u'Manila Documentation', u'Anso Labs, LLC', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True manila-2.0.0/doc/source/images/0000775000567000056710000000000012701407265017431 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/images/rpc/0000775000567000056710000000000012701407265020215 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/images/rpc/flow1.svg0000664000567000056710000010610212701407107021761 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 manila-2.0.0/doc/source/images/rpc/state.png0000664000567000056710000011321712701407107022043 0ustar jenkinsjenkins00000000000000PNG  IHDR `JmsRGBgAMA a cHRMz&u0`:pQ< pHYsodIDATx^E]`vDfB[ IAN$$\qAeq]Yv{[շns={zoΩsРAbۦѶ 9snWڟ%S Г!K6nLcB_|} /ұ/9ߏ/5)հ9?>T>?D~ۧYSO @>i}mӇGA!m ۶lm+mnlMClfĶ16ц~F n}]ښ6,Žn&vAEF>li&>ͬ[K[ne }?|L}(sk39?t)Fxy?5l94zXeƴMF[ShtmoM?]3&QF(׍Y=w75#oEWgzq׮kk3<bX~wĘ>V+v5v#)cߎ]~յvوkvW{vW+lg]z%]xrve.(οȱ.Dٹ);*;sΣNy|tg*;ؓNcN(4dh{CC [ـ!{*?x07h(ͱ>PgAzۍH Pֵҫ?uWY}u֛g$X.=]؍`%*kݮZDtTֲMjQҞSyyλAW[ԝu҃5ӓӫ 6Di?knko=iX{>VB/tǾRàvrv0C>tңÏ>^ݢyvv '+;S{ҩܾc'5p{⩧ٞIɧ)[3)ʞy9f-*5Xfs9~.Ɩ?ٚu ΠV)+[3aZ;ͱ l]ʷ>6!l&1ߔ3Glme]ک)isOEソn.Zkt+\ά^N /q@R4T-`{ X*0^wF]u3l,0z,8`nY6 q46F>&00GmHk +lVA9W1.`R%W_ye^@ K'=L:3踓j"=/=8@ `MW ^w 뀕/ll~>₋ o" E m۩gLCV,̈́l@pIL Ӱ?1^d b`A[3vi f۵9×MW5/ tx1x+*@JZxF`]{3p={z1 \Bl{Сt^ ߃x yv G27ߢv>SlO+s [I_q% QyhGծϷk' 淚lٶNr#]o8~]}̏iʶmqh=Z5V'}ッ 8CswPC{-\7$V32] `U@s <6}k9~R~'Z~ʣ $6;ȴ18Fz;[|twn]÷f nM&F0dzǀM6@:Y_v`}5]^3ep籷{.Cs3q˰==ۣr'/eؚb'BV{/6'뇬b kzړ5AkBV{ jS ,ɺ^dK2,@M{a- 7j.gUQു5aukl5p;<^Xt=]Cw@.]7 .`k][ׁ [m^oe֬/TVgD^|޼Neﲭf{;i Z I6muwMϜI}wmx.Z˶lﲽŶ%t^C<W^}fd`;c `OkM@ [GkMk\V+yw;﹏~1{ࡌ[v}RÏ w?^Ox{)nRvݞƃ @X(dNcn875B8o_zT<~C{ O'qs<[!cj"dgÓY+T Ț&dŚ.=(ȶC-ȓ(u@ 0nlüX&tnkCtyvY{ 5ˀ^!f xݟY0*/k E}hcK|豥[lڒe1mK,[qr λtڋiOdٲ'd?DvM<8Ц =ԣiջlouzm5mw mw4}Ǯ > <M8J~eςl6u}]}Xz-c -c ʂm:*4T?<$֭bxa7r^*3w}>L?=8~=$?q2=2qJ0l{x$Jb=2#pax~<`zp/lӃsf0hO-| 9ƜE,.sp=l5lp37cY.dMXnXXd="dֆDzs6l[sf1?f,ȳ5iGk ޸ڰyۤsIA fw3$e ݘ]g^+^t t1\3&)rZtB .p۶;Л|?%pl&MAN${M9 \Ņ)i^-|` Țs6dMldh1?Bքo۳pM64at7jqUetܤsnfN3UxܨTAfhYCrETвb.7h>!e հ}i*ze&ΊQdע|MmҔ|-eiNVDvO|Ӧ@^;Sg /;ߏ_zmk+V]w1ue6;l,/ul]D]N:=xYg:흑[lo6zmյKȷTLҾ=`P2vT mпW^yƎufx&W/&3*xQFbu-,{$ٜf=1r1jY1m6C6>9 h),>{<~,kx;gϟ ~pD^U>۝X<2V-c2V>q̉l0Ze06zu1FdBldl`moVV\iz [ӫ5C札 %yUYBDVM CZlyA lo7B*żnkjOW /eXl*ؾlqO=۱qCCDm"ClyI' # sAϰ7x5<2=‹t!{{ӳ9V\y>|pe1b8Ui5W;޵u|{-]Y{չ=k>V߾uyk#Σl/t"lޚ1Օ=5WkZju\رd¶nC' Є,B],XX"ڳEwa ߣm>BB[ѱ|gT]_@} R{mle(tc"T Of" yn;45wER7|:ldZ17 U Y6+u؅ 0ȚaQa m H6(t5ʞ. [Y <ݠ&t5p|oXhٞ5P[^eضiA-TN}̓imX s/3oE0`9SO?CaVݻ}klT0_~Q1]{LCO]tgؖ=Yg Ul]r֥g;lﱽ ZkN^jeg*{v+I/B۳lO1:{apl5hYh1g. ڛwyG>O/]2{ D&sinKQ,2uoKQkLan wscs͞ac7q7xhg7<^\| »=a{1ǫQTs&kX2b9 jV-fM/Vd]bKhhL}miA6 -PBO ,g؞g{%n~sc9_ag[nm %GyQEv4_{6mr+ijMͳY e-kz;W=.V-Re?+ m{M3+H?\AvMsyknʏ{V92]~:Y"sv@lx71¦uCM%3h/e::m E{c:#ɼp *e=2:osEH^/>`coO66ŚmȚՐfMzgyۈ Zu Z:6XosLf[{mV 9)lu)ŭ`V!*l/mB̼p~]&3W.csXr ]rIYD6CIkS핗~*5`Dx2{P%j>Aאg|[LGvtỐAK$UA7?lŋ@iVz[+zmBV4mی-gK׳-ٜjNK{7ؖ=>7ۘTiXG/rg؞b[ƶw-UD{9Zhj4m"ۄ驫/Ailuv$, Ѕ>ز bjN,¹$'_2q;v=4/St΃ Ychٞ Ai)/Y%ʳ~f؞})[ʞ2Y{ ~HׄZF_23QiTr1 öm;ݓ#M{ll3O+: 4dE+6ۅQ r+]H 駟 s0Y!vMh{zh|bX1ì,l3z^nAKe}[Slٞg{0'wN8/Yؖ-\ػ%݊.iZ6g c =} [^yAk5zBvj0$gu@810C{Y:YfٓF:6|Yc1 ߇i8Cw Zt!{8=]{ҹsw=`˞xȁ-cH/a(/yA+D=Ʃigxι3i԰?mϡ{?^{vֽ|tjB&^Oןv'Mvfo>s)O\,zLa%=AŶ-zK!=Jy{{{:i*S5]0}wЄh4m ی:Ah.|ElKg{rpzvp{znH{zqH;p\~9&vt xq/0=-g{fHzmh "w9lfHSwHS&={Õ4h[ Z@@V-?(*HM7:l7\s<9dIcp:!}q'!cǟOLȿOƚf϶8'mv&ٛՇ`VWo϶p^h gAϲ=ۋ>q>}t/c{mkOϟ϶m1w}hLlSٖaGh[:CyZg1ӣ^-Gye 3y3psryr^χ-{R~_x ~:ۇ}x\e4 c˿܇<"Ǘ7<,* xj~x<}Jә|L2lG!| j!` 4 Nɏ Ex#';٣ݽ`f'|6mJVے*gwkpgóN^KNqQ|GP@ ~\@Ffb52rќlUݍS-P@X!kjj!i.~A Z X_ ڪm]mu@[Bo^5W-AUAs|ўϵ3Q \ˀ-l'M Loئw#Eiǜ|wNŽ)X#]N<:vi񚛉4g=iji|z ε~a]ď;v;2fm=2FvhF&?HJ3f6*v0ZؑCi)lO3=fp± V۴}[ ӏÖ;DSO5-f[t0Z6=t ;jz[iX]*[s~lǦk" lu4v<<@v*%`oOx鰱.5z02sJ:QI k/|2sa2NYg^4@׆nzAۀ] r `7*հl"${q.raruC̺PVV*-U:|5QU tJt<^x | @rb3xwi S6õs.<h_,m/Hed: /Wz c#"~ܵ0]cQ&MM#3'ܓylOҢSؖ=~ҳlϟy0p9˰sW\{om8UJE,gJٞa{Ciibz8 ?kJ9Amc<=]XX3q2onCW#c ]yNY}O I='t&@ a:@f8 Uyz 醂g!j{Y億ϺO/rW'z1ەHuX;' Ggw`F*3`O*:U{xVm2:T;=\G5؝Fw@v4exrhЈ-?ziU*$~BIBXUkyqY1B!9!#UEC8 #Z\$P\X.ٚ>6Ɓ ɅBZBY■lH6aq;[Tls]'5ݠ4f!{P|nV!"y-tY]3@ι+3`<7ܵ||+a$%*:c +y?uTvYv1KE]:vU7rͬX@7TfҢNEL"-DzɋN}'֓腋O='˗H.;^eo\~i[|?ˮ5~+NWV^Οɉv-w?uI vi e.ͼU]T70Eh5l*csձ.ȃ\w57I[*k %߮` bEnb١i:«d=w3M9gq$؞z#o9x;xOA9gU^'oQɾe@ {5[zyWx SPrVϻLwLOf*oN96cKC=ƞ;FO.؀\j[J_˫٫ŖdR9Z\uQ  -W{ _a$VQb{@kʚ{eò=%ʢ̼l[%к^NkBH%lmjO׮kg V&c\8->vC<ݨUne U +JM?^]bG<4~);/[ {-Zߖ>\ڞ⿟i0ǖ,{1\AjǞx֞ۓ|ߵǟ\ΎQ=ŏSמ[n߽mz%zszл3{hl6lf׶mmdzl.?`[l]w@kg6w-6ƍQ J [f^Nla(hN6h5i6'JjrJw;LGHnu(^V>M%}So5 Y~R 'HgoN--mQZQ[Z1Z1].%t\Wnmm]@'3 xz ǟq\tlvtL׶t4QlGv*#:-ճ=bZ \4nh5tV6(rl1.'y[7[0`=\qaS6R.: Hȣy0ާK 0zD7j@esK8ͼɘC^W/?2ICv\HQwVqޮ^,m=6r伮 [7 t3j tc"tѷZ-%0kF T\^Qz_0$9Syiʰ`0LJ! #r٪l' {mCm2sh[amiH1k*%1$ iAVLz&hMFC5#-Wy7ߢLmw xd`¾Pdebery%0ib4Y&G<0(ETu=N]ORaH24501Z΅afZUVGqym*|ԯEEZ^uPhXo9 ^SNixAV^T@/2Kus Tfa ݪеAcaЭpߪN mcj~*ǮzJ{7 7UV8s;@[rP ȣ.cp!Ze gc̹b`hn|]ʋP> .<Q^.<\n[f l/q[U{ ]DG3\</N(垜Ш< 7 ;3V+m=m|2im|ln1GYtA T Rs0 H`&V;t ]zjlW2+Չ(k='U^թǝgO- &P{8՝! Cjo֞)Ak9$#hyq T 2AFӞ͚ݥ)Cq!CuHݠ𲙍L6$U@٧]aa^nQ^nU<\xu!l h&1l ,O.x^ĐhΜ.6;'x.o|3aR W'0CAsI\3l{GI3ܸyܠ2`[a$!e&Wsڦa#jm_$Y ۠[ ] 8=^/8 _ /x|ks=@<BաT iouz$8a?9[2eS d/4X{mDE l"8]^'+ag ;E-YP@q O@!v-DRp^ ,vbpbTрEc4d{m=.dmІA^K\N8+qxYI0W)NV>lޓ[Eky|vٞ^Dce2knJ*,&,l`{$^VNg?hy-y:4Q8|e[f+oðO[J{%ezA {K={a^xQrop~aO/~ֱYɧUpqlRT-.| !C*r s#S^WdrmU*ϲQ^m2s-E (s0&ajy)js2LciV׈q9ӲWs2ma{&}qƋGY+}꾻@߆dn#6_Myw Az='`/6Gm=DC?DT6Er?!}i.c])_}e||y~^Sk+{E&4 o_}1 V0+kvXآy=5nZ$&m  D @$@V˔ DuS/o~~g/>nM_~~vm\y¶w [MHg& *4ak/ J`[% DuSin6lXq,.M4  TNN[lV ܼmi9X±2OFX2(/܎e+|VV4 CUb{M^-(VTPYw0+4lU(@'}}V-ڣ&jq{|=^[ ZaU?jD9ɮs{ݪklZ6#AgTl*ʩ4a|Jzg5ϋ{5- "$Wt\ IuՎI?ykjC9poCZ 괏addX mY2l?kPo68CW^6nM﫴\_K}|f<)Yﱎ Q[އ7.7?xG' #wzbგxU"{#|~J7H.+Ѿ)/uh#_!G;TaOBJ(,0e<Bm_5\Wk7})}cT?%w(bk@۫W,[Lr}~/|:agOH|_B=Os^5y [/|lymf;tlOUL;:v̅|ls>3{@c \*9ʌ8 NfTn?o@Nq|q AB?AOh1^8m=7': Ʀ^جVu<[;2' n3χ۞x"7קW}vo|<տ?:?$j]zMl5hKJJhvN;8>~[N{5g o]FD[{҄U0a-B_}I:ΔpöP؆7ƴ _噃Vվ8v{7A `w{y|Dog^gk3}sοm%NW[ vѡJFqtwxis׮>Ta+m\6+^w`Q AfJ {>tK3W*h4vvX5o>OKҾU=O wqПl b0JpNlu990rgƄBMYO| mپЕ"*sЪ{q4'5՟H~%/PRo^ՃWl#o2C3YaʠOھ~y1 A}yaH&d'i k$_ק , >'*`/@LOr|\s1GؚFvl͂l?grt S>Ysil)tA9R,픬jEjxr?I`۸2-JW+Eڿ^2KIǬ^ǴH{V=Ju~ = lJG㬿Yν{@~kfa{ϫwʺي[r~ 6(M QتNKV3ad҉EрhH Ͽ}E >\V% lEUO# % ۳ϻnjd*MYRgcDU@ZE)sO>ϽYleTc.(, s/8"FC0_٪8TG/יJrщᚬ1[S.Pr>|RBlQK. .L‚l>TF#E 7JG! m sϿek`뀶*5;UN=ͨ<ðzIU -R3RN\25! ҄0F7 ^ti`GvaiǫZO3 իճtrJθv O_!j -ؾel_1_=[3|'W=S4c0rlCAիi%):e%y^4"; [=_ fKռ-lI-9۠y XO3-آIh!ΐ0)LuP&u!j Wul0l8nْ, l{LROJ k׫P*hי"W"'9yMyH}@Z]tEy]Y`BkxpIA&W'0iJN4rӰ=ZԲulEh@4PH /q)Ub$`f<[ #K96bG$ ԮRUn@VR϶vOt>iрh@4P3H ZS _ХW\E.p2l*lE5#tigigрh65lᤂ`9[W%WPad7Yy ez*-рh@4H pR5KS_ p򫮡F\MFY~ZrB y|9 ԾR-;'X- W\u+IWXI+TTP9?y3*{*25/^5+r9T#}AIs  5:lqe'/l׌$W*;p+5VՇUpQu f*'$hUeVY>LC}|_1^[^umSoUK/ jSiܼFun7Mf^h@4  d4l_c؎!Q0˯ wF4F8ڳs[6I/ D` n^op0l΄qcil7Qoq)emRW' ҄ eʄ-4cxo-?RG4  ҂q,W_lk=6Iۘ[ͷJ'+NVh#t=u*`[=L` F+]ٛo lEh@4P4lMP7g5~'~]lųV4  ҂[M| #ڝ|{'jtdum.+^h } [8KQa-~nsw'l2Ih@4PH f7-{莻UVγ!kCwD`<*2WLyC,f=ۄb3ǹ^xqpdvY>鈅y R{)4S5a;ݯlkmֳuNtg믗R6r/;g/+dtr~ Siv5;s8wg;=qX Ԏ҂-[VF?-% ҂~D2?Pŝ˞%l-}[8} VtZqϛ$_&Z$=?;9/@Zx2(qgǟ|iZƷKx>'[9.EI:C> K75E`1;K1C=>E?3_i؈qSV= R4Tjrb/\K'L2ߋxqg[ՁB*F<9Fۭѝ4 a~)+3k#H]4 ȥ҂Iu4=ɷO2S|瞧gÞSO?ͻz^nd hly+¨z^^c# P`-z>̲UjP0KVn b0 IzmV.pgEiJ<=ru ˯°}i/({|mԳB[ -`׻u:Md]c­}>^,l|B/B9b5lqCa/;/V}wla OпeМ߳N6 VGB $8\w|Dii -I}4Kg+啯K/rl*/m(l0a[o֚ #d!vj{ ,Vϫsq~g J4 (P [8+^ZI+^v \7|g+*{y+>\9/1dѕlŝW_{^yuzUkŗ~ئP5[6u9\_'9K b~Kߙ0o^-z7ߤW_ꫜzuM >RW.uf@Z˯ `wxm|^gꫯs4G>C:h@4  ` '+ 3-;[oޡ7پm.HCr  EiN*c;wWl|#-!h@4 @je'2(x|}=~يg+[D@k -~N0d]\d {l5CoJ'+N&}D`7:Nk`` ;tB鄢рh5l9"p\c[vZn5l}}^zm1\g,;c9Ǣ@H rDxuc`Z{^[l2I- * ND}V>Գ*SʴLKRisNsZ0Cu(_R dhĵ#銫M2 V/ nD=[ʱS 麻zwcɪ}u dP" ҂UҥW^MF\x z?#ci^{lW6ʳg3NGW5^@?lSɅOΑh@4`k -[ ٓ6t/f龴3ŝ{07h(7<|gۘD+AVyۼ ɅT.@e5l[@쮬nCOlAP3p͝hճ m|=[tv@yhF4P?4lS~Q ]nK l|-` Ж;͂mB ~[K[JD0 ] [npٺzXb5VU*7Գ˅O4  Ҁm]vܝڏZsBYU!d v]r |i`9рh@4&l[wmOj٦{2׶f¦r&\y"nh@4  ҀmQ4m֣ԪloԢa #7RnQMG^!V: le9~/ϗ(!P Ҁm}=+[ l[p|m+j[DSg+;_94llܜK͋{PV]Nnֺ#vc&ņW3hÄ؊g'޽h@4 HMДiYK,{Zu u}ݤ`ۺ}Z3eζLF2 ҀmiԙԤE{jܢ ߶|nL=[,MְEY{/}@4P4 lIS+ڼXYlآ,B JHzрh@4PO4ls.\GYV VٰEYa4#TЩvP&+]b&UB/6S#2縞|UןQk9ע@*mZD&Md-3adlUe-` !86#UƮ窀k&⏬7['B:h@4 (\ ۿ6nAm҂vjڂaۂ֮ ltZ [O6MiY_Y' NTYf%% D9@:mΞk"mQluA:ޱU^.lXF;Rs+V4 Ha=f{jq9[F6K0$Uq jr{>jEрh l5p aˏ[{6Q=لa`Y!z9hणyE@j MejlEd+29rnE`ɔjV#!_.Hрh056lwܵYgS5 l SPr* 5&lwܥ)sFF(V<[E@}@ڰa @j6/n>5VЈD4a *u RjGdrAs- w n(lZtK  ?n֬ l5hGdrAs- w Ŝx{Ǔ/}@4P4+[H%iDрh5 A4  rmXBE+T$[ηh@4Z$1I4  ' lųh@4  X5 [I(#4*z ҂ ZB֖لʅH4   [[ұKǒ)Z L 8l%P.Bрhi m鯍 RV:[}l{EYG`[E'9@}@` 64]쳕V:^Ѽh~k@`Uw/_4 jVD'рh@4P5 V2LjD@5 q'u/+VPyzN"  lSmYE9V3\\W S-sUt@JJ4 ȵ ><=꾂Y=@7HZ ߀mzs?'ʳU-/g~t\w|јh@4+ lix>ju=X.Ϋ5a!ޫ -?X|[-@i ->21SGl F'򼰪^0d4)l|)G_g+]s[ZZ4_`4úګ䷋w# G zUrg[аߚ4 , A lmبZӂ=ېz^Vyen0*,>/^˨>z7E`;y*I 5gmMF4ڇ;B-`~_TY^mvY`+¾+@öYvFl`A+-рh@4PHzE2[g~YmaG.rE@2 (ljK0fjdleQh@4  Ԓ҄NM[Nw|-!}׸-2(= ,*ߞff@qf)y٪\Zč@!K>6+V/*q6{|}L`w1Xd}I_WۧVE/-}N҂- uLe6˳ؖUwwV:p pAh fw瓴_mq%/}T;&9$A^ps?G5 [7qfÝȱgNueF~8՛'6<皩S*f<s-3f`|Lcz {+t%"XGI:ޣeǞs~;R'H.+ѾQt|mm êx6TJQP6ώ,vCD?cOlTRoVivWL˺u2"y2LC׆|_wȸzq;pV [s!1ޯk{ʥTjt:|ywhqg{ύo6KVjq0%X|l"?AOh2C|zqwG~t~qBMc/9z5!1خa…D^U6,tWo6yrI`u#quVl>?-B?{P'gΚpP,➯6l"B7_p<ϟ7A I6qP{PX-$x|Wzӑ7Qk@l77yQs!ZwV:|\yƟ[{ǿlsaϪG[:iE=[4COx u8Z`ƭk?Ō&`#MW/IE1:7a]s]h5p l:\ X8pƇKמ~kzrrl4(ݹ[g[G`&Z#rSگr%JͳMviFg+}Q9s  ҆mSTs,M=>21`gk / nK I#_(]hS\OF{ij @mֺ5y[x,V`+MSY'рhhFaFމ| 0tQ8E@u4P mV$J D@ ֟]أm<&Z8 lUJ&`1יPr{et- ҂3yq{j޺=b۰QFlQ'h#R|*'O8*|fwR#9 Dta-v%cbu&%4atjZ:1lNa9NSO3sb/1me;߭SuJϯ/C·hf5lhYTTܑێ Z@#W`um{.N&Nɰ-I [-DUQ |%lju~yf;h4*lK:S[bvanmmǰm.;`M2hlU _.IQ 7Po3zO^t֢@:mljѦ+[7jٶ;lד6lL 6oo{ҮE=غ.EԼeHfb=ʹ`E_CA-z!9 [^PFBM[&r[Bdz:\ӌ$&z=[uf]]ƽ_."5}͉5l4l0hQqAԦjuO퇴v [Ynp[IK*m/AD`;m\h*mm?ڸ#v;*6/DM[x#guaUVuFz@:mEfsڡԮ>ԡǁi ۝L³mHjYĦsbE@h -Ngض;GlFB͊ERWK[t>L{]΅ рh4lٖtİa佨C9 ϖjd,^m/j=AI-j!{D@4l[ry.t6Jދa.rnyʣUI-8Ԅ35J' D@m̫jj" @v8mc΍<-Am5|tBрh@4 ȕҁ-'Pl{QkNhQ[J: R# k+RUԹqqDрh 4 l jlũ 83UZ;viA;pm[_=ۄ%j?q r DU@:-t8/r׺*߸a˅Sv]n?܌OL)6I!\1(OtU/"Wh } [kQ҉:ߝ?(v{;7ul9Ϊ'rzDWA7{N *+3N֋S)Յ|UT"Sc,/+ UNKBl=$kGJ/7+m*m*;H]϶\i[T6l:eoզ<ۦVt@d߬^tޠ4P=q (4plt7諯 (׮JW_ Cݹ0ȹs%HWցl nxg[N0&\ k!06km^"N-`M.>ou]:l_u_:oWSS4Pw4 ph9h¤u Js`Qa+ ,׷Zpa(؆_zk0+:4Mra;9WrDjV`W$ r}r´|~_e~6ݎ*))7MlǗ-[/ riVLNNрh } l%Md%/ҦҦkFa}YjzV{AIKDl lųV4  rmXFx2 DVFрh@4c lQzf!>*=RVFiI>K$Y lkk.SH=ۚr1 @6jPF3pL=V/iQƫk|?]?~Lic԰NNh@g+:N-#Z zIņzqUkܼYyJ>:7~)ەz׉&D4 @vzK"{ml#$]܁_U5(^v\nEBν{@5 q֐"zsҶI[D5mO|\lD@i@`+рh@4c ls2rh@4 leD+ D9ր6 ,9.D@i@`+h@4  X5۵MZxem͍d*m- jWJfj^t0@U4w5vVz"rT\KhTA*2 3znd}^ŘzTOUNG:h@4 o l2w3@hCͺS"fپ$b\A~Ks-\P%'r9?U@uma|ڲAxx(计mS`~,{Lrr=E@~k֪&lەBΏрh QԴ"9۳ W딣Ӟgdل5=Ux<^Uap8ir1 DuKy[ǮW ,RRϛc] rgbyb!z2'sрhi `њԋ[#\jA>[  ԤRYԲm'jѦ#tP~Fڦa#jmZąså^t\K>[% DiV`LR RE. R:h@4 _܉ vDрh QܬJ϶~d/[4 l1o ˚JgϝM~_4P5P+B)DMC7rq79rI[ Eрh QҼ5Prڞ]#'{Ue c@Ҍ|$~Ȩ66   lq² T{S!}i} u'ؤoz竾\wf*QUVS7k=_^ml[~46o`; Fǫ줳4scKR7+Cg(  6X ջڟgkכEج FNROC-(4rvY^Y8kP <6hP]zBT"9rn ][&NferƇt#ayy`eҬ$zJR63цZ,zp=+5Oq6E[4 G l-a$nȉV{!9n^/$m~벅v99Couׅ%(~рh HyLYl|X}7ݨnj>fH7ެ@h/mwko~|ml&i5յq IޯkCU\"& U lj#qK^8D H^1NjeF!W!Dmx,Rmr j x5r%ފdl#/ j@`+T DkV`IS[dt$DmG3Ngs%J4 ȕjMZmlӯZXls% \8Dx  lqӯ9HdZV:D|66 rԳue#&g qgEG4  Գ5r Wtt:h@4jo 4^,l?|'^>O.&рh4g~ԳoZ\4  jM[%"9ȳzNY<ۚm^Z4  lS7clcDǐDij `ϒ" Dm䋘8& 5P+ż-lrjdU4   O[leh@4  X5 fۑ^,md4.T4  AY`+Q:h@4 6ǡ(*r1 D~ l2W# D9@fPEsET]F2 D e[.+dGK"fD@vFj^ܞ")۳[ˍt8QPt^Rϓ.-w"㫧zzHG(HΑh57@ed׷Fz-`lV <L}E|U\!1<ެYDί_@2 l} 8!5FsCU `DSA2 DB@Z6cjיZD-tT#4lD MCZF֞m(lM0F̃FbK o7E:pj oDU@)jdaۀiy 5=P1< e/n2<_][J̪t4y\E[y[sAQsտ)`,R-ަ ̭?+'_οh~j o`+.]4 Hm9[m7n #ׇ(рh@4P4*lR[Yрh@4 04*lų59 DH7RH4  ? ]6'V<' h9 Dl{=Zd M2%˦Níi/k?@` tQTAHM>7+?"xIIDQ-@W__짟~#}7g}NoK*6֞mxJ'K0VBрh@4Pl'Mo~W_/f~A3ӆ7ަMЄ ##g#nNs-kIdn a\K/8,а՞͠W׿~'?>~v+o*6"gΖ=[6}4ܖ)tA%SqnʦϜM͊Z> cwE^4 M _@?GB[|H6l5k6oG[![Ocnk{kl6ڎ6nL]P;eiԼUqJX^ ۰zծ+,m-m- Ҁm-hTܙN]=۰3H5ږ4v{ڸy uu5YsS5$ ;rv9mvksd%-#{рh@4P;H-Z9ԦGn73p[6mz Fn((jӾf`Tn%FV%JXoW^;Bvv jSiiV4c\jׇzQlϴyg>{נ=}t1TMXo6O|\lD@h 6/)Y SA ummܺm=`mV8 ݏWټ.lu)U}*gFgF*'Nvv ԼҀmQ4{Bא96}O`?`;`Ra+h P)rj2𸐼JjzL$#Gŭ/na V&⋯BmW#Ԟ*,EtvӀ=b;PK`+'ȱE@5`hС4s#={,ضґ<*ݗm?ۗ6!m~gjVJ'6 D ^pT1{6=䓞˴z@ض։>q9o{ۿ0lysnCg +JS4   ZildGnuH `gHjdrA YF?s$>k 0lXeBʞg֟C>ngbjݶ@@ėDuXi[v@s=F=އzh;ho^ !m#/jԢ=bJjѲ (V@k 0rhc >2P!Fkܴy+u3(BƢV%"Xb lH? #7oۆf=:vߍ6m}N!-ԱG?U>4 BINWU#mey\Q=*5Ҟ@logʕ\gCjf4sY嚶]#;V`+ Ҁmcz9qkQVQ4Tw]ضlۉ`Sx"!Kb\ٸ{^lWwp\Jy&wOshlsc~u6 \r- m]^h76eΜEw8FضnEAEʦNgm. o]wߗ^s&5Nm>? zq߬z=o<<3 $2Qg?O`^' hk 6WaDӊ[[lU|~0Xc>Qya'L %'^,x N\?U#¶mت02^TV_zN #jzea'K@ KӳE d ?IV}+K`+؊ XlC`Il0rg Ƅa´zN1yY= " -I[5՟ߘɎ߿HInXs!Z[>؊h64`5lW?~{U u6{ﭥ7zVzo '~`^ad-`!&߂98ilD+x5$ hhU4믿enQ`}۫ߢW D-W l!dE2K)鷩\MEyv…W_ѯJ~ʻ??>oV.޳{4|س,r`@ki#͛ZƂ-R1 '5~mIǮI-8,Zl B#[GވC.r Dh &#k*M1;;V`+ D4`FN0m:#&yJ;D@gy- r~D@Yت{]TA4.UB"t+)) E (lv \ԵdԳmҬfœ$,рh@4P5P3:{7m]{S=3f-e!xрh@4P-? 5u46lmڼ:@Rn}hԬEn`Ѩ?9@-@۷o_ԩ]{A\p7NA8fy="hiV0u;ٍ:@̧V%[ъD@Ak ʳ=謳΢QFԌVR lCa-[?ރ^RGEqۂn4FD2 Du[as:|ҢE@LR>϶S'iG^р=CUbjզVFрh@4Pа|)“]dAEPni9{0 @h X ,Ѻ='O4 HC?ʁ 4K5 BFO;>oGѰ!*.XJxm?D>C:h@4  4l0@gmrWku ܣi G{p|m9.鸢рh@44l,X X6g(4xRR(MDgA4  4lÀ1 K[#qБh톑ų^D׀mLO[@jѠjUN"w-F΍hf4a hVbHm}|HOxO!?'MƯ׷v> e%tP[qǂSݢ55-jM6iBxnܴ +XӢV 1w?|F+zj31iрh@4 5p ']h?q:k=a*EC .L=y h4k|YmͪLg[~^?Sm}ߋp ~Y*a1cONϚl cQV1f*2M>L6&O irMl ?cL#M16aTMB {N&NGblWe֦Sy]v|Go. ԃ+u]5o[BmV{-JÆV9~Mq~--=num:Ryq{j֎ִU[&-ېi[PZTLҼ5EZm;5mIq&-ȴ4.8qTYafg޹)USmgg~tyyњҷJoA`]‚|<ߒ~P?N}/%7A}%f?I'*zX5mav? OP_ҿP4[tljܤ)APYou  ;T R`m:u15^t>6@M43fBV54.J mٶ k Y-r ZZynħ6=۫\CՃ-h-{ϏΧÎ:+" U@ k@"̫}!3 TM]tJN`0h0]\,\b*Tر$&" @ąɹmXXW6N0:3imTYآ/ա\Ml Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 manila-2.0.0/doc/source/images/rpc/rabt.svg0000664000567000056710000010200712701407107021661 0ustar jenkinsjenkins00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) manila-2.0.0/doc/source/images/rpc/hds_network.jpg0000664000567000056710000047404212701407107023254 0ustar jenkinsjenkins00000000000000JFIF& }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?( ( ( ( ( ( ( ( zt&;h~i,@AW!#Vr:-y$,eiU<)ibn9^xfLjebl m`e-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@AG<6v>A4i TwmkviO2D@֓U)AuޑL xU,h ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@P@ψKmo2dp_fR05+Rogy9.%

    Hu6%( ( ( ( ( ( ( (; }=KKA<@ g2FI!`bGI$HUtt`22`H (P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( cTG羛 c]Ŝ* pH .PϷS^]Hdw.zʊ9ڈ$j8TUQ P@P@|mSWO- QcjĿ|HWGUh7]">&Χk^4|I\/3p oԿeOڍ?>T1D]\jO<8LH>ܻ3d 'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4ê?hS?N4)'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4O<;H{ > KkCRG%b_ ?i|>%Үo/<xP_'>'Ŧ6/<;iZ}Z&KhBEM^t-Vaf@P@C2Ocg3 *PZ ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@x3qŦ~N{.^(h ( (c<(?e_RWe@?h߳-y'RW =<\[zn<~~jn#~|t;? %X~)|/Wx'Y,^?Vҵⵢ-3Ncr yAf} HGj@|E⟁Z޺,5OԵa]_:kgmqra!|/|D{^ׁe&<i'7èh!߈K]{ /[K ( (GԆVR^~e|<*e`,P=dpP@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( U7z9$_d2*mQP ( ( 8cU%q@TP@(QgT@L+k7k[}?ο<_|,Ӽ9jZ| F֠ڌ\> 얌 y*iZhothJ_ڃr>s ]/W|;Vӣzvy,5-s:}j| Ft&EX c%o,fu)z4XYҧ>7Vf.Hf.٧ͱ~'XqozJG⹞㇌xڄvx/Zuɨ6M߇6Z7("w9j߲|_ӭ7[B=/h^֛akqhWQr?(?_eZ8( ( (>\4f$zcHQG#tǓ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PI$NI 9%:%I&}+هֻ{|ҵjW\jy5ԓ4\KUW OeLE~x ? l~ҟڛq3f4!Nmy?>i+Ot&(Ou˄ kO}ڟέ~>o]'/l0|I-m ˛[X(1Lڇ UoڇwL|I4۟ٷ_f[#֓x5^u? ޅ tMڴIO!s_=x/)_4t?x߉5]}BYn5ψO^)*^j{krWG /_ğKe.Wt4OO $xVZ/x|[BDm5ojWng?1b|;ٯG~do؟׆#_:kF oR< 7ĭĞy—}-Wº=@++\ }@P@P@ܜk#<=A?@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( (>s-#s~&g+'_)I<l( ( ( ( ('ŧcf "O 2~)x݀xUt%X0 *AE}@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@:~|S:|,Dƾ0'4k&uq|;oe7aĶ,~𭦛amSPkKx6M7|6?‘ܧ| CKƥuiCfw6O΢`MGT|ۇڀ ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( ( 纚;{himv=x'Tb&?(?V4z?c[Ytpo>|#xȎɫeOWPmj:6O7^f\fʸu5yTq8TTxڰ=qOԹ?]%2/\*siR ڒhTm?kaSlR뫛Sxq"?hu7/:g/~|=a}OX^]%OBM kf\3Cp_Q8̧dGQc%NR|5[񟈜'ḳ8TZYF*pr%v?/( [_aOQY~ntx6Wzwic`6mKxm%ZƵWǂpWΕ)R1ymkiШeB1:>*|F{Wk=׉Z/菉.U^nѣ7|e*~ !2#E3JtkQ֠u(_$1#/gˊjptxmdX,cK9fþ\/ZtF5~F~ˏ1|+G)񌸟 (eCsl6U,R}^T*֖"8ueN_r/v??$/G.{Pմ [S[GY-u=L-VWW ]]գX2ε8r^**uƥ9&ԡ5(f֫fmb0c_ ^p:5`M8ԧ(-4iRg ?/hxW}ſ io>t6'K2ؽḴI',27_iٻu1<;^eUT[sԌ%*\ɭ2OH(] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( (/Lj|!g> Px>>/xiR4_Z|&BYs=#<*Pz/iwxM?ߵ;O+yeo:rh%Zſ|uhAk7OBcø(VWT9,3Zқ?d*`v&g͕GȅP@U|?k['_ߋ8yq勏x-#V{Zya'GeۍxO"RyXY6<]:ػ-pxwW;mQn>!PY siGن-Pyl>߳=oWIjֵi㿊.JA񖧧vzN8l,p8LO|- *x.'8:r)C2T"_~~$?؟ğ'U?' ? (CK?'A7LVx~W@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@fwH}OkT&"xTXt'xH䍊igpYϫu0Teu&ۻJeLj'c?~.o^Q֥ѧF4hT t"٣EgX>.%'ZU}C\5j>#$yơxI$Ha݄+~XF8rH:9oN{upg*iٸ:i̺}UOr#.$i#N.KP -ԍqek;c 5jLMӆ|<9 0[MHmBfR}ԟ'Gz cڟW7SOB]5%+ƣA_i.&hNI:X )+s/gbYPTqr\<`. 9&rU<ҭlNK~Ra:WhG,EJQcIX+NWC<]k G`?׏AC4v2Mj^ 4ۣ_:\NsfKPn^qbK3_TZ<" u*jX'chԚ_Z0><9n妱O5ddzVyoet&I;U?i_ЋT\E&gVn^VVxw 3xWja(a ړ(ߊ?T_[N_ ់ux? <1uwB|Sm'k_ x~{Ҡ8n.{Csu;#<4|&[NesLaUq)bkVڕYE7[݌Oc9sO<:St2e &*hrQԪ_L]w(EZ/Fa4 jZ5K pxp#ĻCZ(?':ui4۔*Ir=|M/b2x~K[ ,|R]Jxc1Ǥ\ NsC8>ibJMVkV3?v[_ Ujy&snU {P\oE.o>-"k?W?eo>&h4zC:އy״9,-u]?^Q{b+ʳn+^'"~CWi`Ij֎!PuaJ.Jp' p_yqp|&iᇥWpΜbg(?iCPaڟQh.6|!|1|H~ki{M6 yuy#Ӯ]YoH"~kQ610R \ƋjcKjrN3riÎ4YE8 ҃1 Rx)USJua]I46sB.d3Ə9|ItPeGt7pUqhP@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@x_c&glٝ?G_SZ%ɸȞ+<9JF(>_~n_>"%KPe.d>Ex2-RYa7Iҕ&{e xļY|bŨ}y`50񎒌}S}lV3QRP4pY!g>OMKQ9ٯ1UY~ڞIv*Pr/ (CK?'A?GO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@Pgu [F?^tO#5kxw՜~VGqeYO52+$nRDx#kBl5z8=YaӯB98TZJUiYt2Z$֨p،.B(R̮xK4*X+sXiԖ Fus獾\U|F73eʤ8dyگ[ IZ*.tX\e]n͟?_Qosr k.KQ$dYh~oT8+^iJ^ڽ_N8M6nkk=~ *0-|6URv<6S ;|gWׁ.#A/1ěk]>MǕ&ߴ&`Upk:5捿II4-q9+s^anIM9eK^៬Eqo4 >>M |M x^92@5oqRl\d,"$y4ѦiR N3KҌeG!Ǹ2ViM;:M _4OJf',Cҿi_wֿ i޵t|=1' ;Lo5FykQxW9C^7î#7ZPcÕJPp_.SRIʖQYǤӺugJ5ʕG>qS/֩x}/_?êT㇥^ߝs?DvM|?J莮z ~KT#_.֑xXy, \;Y;Nu(ԧ40+&-/dy]7_q#f(˱4j{B6Xݧ KC޵s $f<{q! A9[^Vj!fpm EIGe(xC01V,nVm{-)][ލ즢G+Ŭ,*Ng'8 [.FiKckÕKk?K(PH?ߵ%:XC\|ؐ|+c,|콯דrZ^ ׵?SgA;כ2t~-qZΪ|ևNECHZC:Io:֧ Z#+?SF7${$PIϊ02MI77xdjE6x> ӌ+eHmibbҷ$`ob%~ٚƍoq4Kx\@x#D,cwn7Gi$X(o-*Y=!*Ғ(2Y!X&Ѷұ/:A>.u1=9ww"Ƈm|Kv-eE^[^ 墧qUh|FZ*SUayDg:,^`:VR'(IsJ|\G֦T t+F|VG_mJZtjcp)ԍ5 u}innn~yioiBK,3<;3f%5c1Qs<1RQVmJ1KD%Z# )Os93)MR'w)Im5!_?׿huG$?2_&o֫O|%:?=X;Pբ__AZmţn$^+ /u4θ5c1N4(g[N,. Rժ:tYWVԬ׌02py]`3MNXWeYl5lv/FhvV188_fzW3?h|)|_xwF?OW5ox;֚$Ƨcgt˩H,M㻉{𙤸G6rʸ~'g8hRӡ|4iԛ"ڌJ~QN- q^IvI=ȳ0،~Tbysb+R7) ~jigHP@߂Nw㵲?g%AZ%Z|[bvr7S8S'.כGRnUhNU$y5y6{9O>hɳM$uq'~-ۗ7ﶛǞ]_Y3es뼿Ů/'}Y+_kY/"/!6ږs Sd~Pu!OarL ;\|F|wF.zz3|QI)8.+8qjGy+bgYݚP@Z?O'L2.wH|/s[۾r$P` (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@Pg0nீR_mFi`u?{4v |S[':|%sR%*y֞ ;xCH>.fc`r\괱S,4JG#Tl;v???Z׏_(m;S}cxZfϧ0XZ+[ik,QM'p~oY0thsU8<'C)G*4/k$͇5[/f|=A>.|P晎#r{UZ88BY we㋨E7__$ /x^W5)lSce#ڌj.}kNJ,$M)*Ͳ]n :UMN: SMJj(ե4Rioy/&02T:lM9Sj5ika(b)9ѯIƭΜ'@P@P@__~1x@qs >Qԯ$#t8kckGSM좞Qx`._ӄVYNOݧN U*)1~Qcrl'2̱,6 JUkTi pWZ(ѦVN2T};ZG ]iW){em-m!.&)4MfI6&R%ŴѧxCB'ZJ?[f U\^55iS&&Ux ^#8,-ΌJp2lAN>짇Bt*UR(I/ (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P37Ѿ# C'h&N,E ZFZy=X7ǯmÿ|"CZj  }E ߋC㏇1_[FŇx)F3eЍWC+TpaeK1}pa:Tɇ3ow1مKya[|2d}EZdq$vh guO4{?üPG>'vs*[ Ε̖~>5fėp\hַͭM,De]f!v3 p-\_⧉3cJ%Jɜ 7ax짏-2w=MhTyz.q0Y!8Aѿ?tV?E fuE?$׈l {Y$O.] 0|MnV{&{$6H~)8#\lwT8^,$׸xFm42|$|S~WYlV+Ԋt!O5RTp KV`o{Yü_>|L<\-,˙aynE|u2 qe$SZOutfi.5W3~%hYa&.VtR_J_koCx)+Mo<-?ڠ/71vYď yjQG&j\E{qz2Gg (q6udhp ƥHbsm?5L^adpM:J5#4ٟ )9SƼwCvTj`w.h1Ʀ&VR^SOR|q _ c_ aχ+[oo%[ya~b6=XɸVxO UTs/sN4КFG&ԳNxlOg^vg8.pdxn"E 8EX*qX:Xfx= jթ'S)-z3gl<̍ڸL-LGaO x/KӥPPXSNQlUzp~6chRi^mV!Zq{j1W۩I|r^"+/5#_gS}?Se/E_W'_k/0Y8/o*|Eu؜OB?`'M|Oy濵G}ߋ_O/uOCe/|e֧cHOMy7I6k]GYda֬NQKxW =hgYWa*zM, ʫ<Ò,jǚWIQHq<8,6Ừ [Wa3s.TliJ+´״-F8ptULbH< (] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( ( )e=G }|<״O _!|IxA_M_@, kMuV#WGsOgPG MrkVR8bUZ0SF|+˚WeO<_%*74?>%\8/Lf/[z?վ'GįY2_x7c5?߈?>%? S6mYx~~ѿ?>Gb~Ϟ",i|>kk>56U/w6&x[,xZu]8y19a3<#G*j8%%Jt(R'9υm_x\[V/(hd?z_W?~EV7P).Gg_ˤ77Iies;' +9ekgVʣ^>8e{HRt骭|.soGj}"8:V=eR<~igujʂƩӜY!!c^*6(5;_[k1kkk1_GE%z^v2܁0zpN 3𗎒N9aC.JtE:N8[K+ŴXx<2CNhbx\*񪦪\O}x;*Y/><ؿm9߁hq' RhiqE߿O}x;*>#݋?/~طa+P_|MW.ۤ6[Cnã}sUTC\-m" BÅkxUdV8iae_$΃efsjq/Þ+'s8rf*us2ʽ4a8fS*{?*xFNgma㨿k??*:@.Ug %WY0R~9K>(?W<^˓NK[C!ҏћGxgO}(*{^o{s_S/oPwqt>,iܗ Ul:1 :vQ^ ; m<o[C^>5}#/m]nXjf<t?}rvײ?? `_Hh W)'/ŏ?A?m3~O֫~O'A@Qu#{և_4O*?oZˏB ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( (_ })'oxFok~\bx~ZJzeݽ'JTxw%bu(b9U:1oԄ}=̟CQ,sGϨ2y'C4o ,%B8Tk=?^"W Y^$c׾ |M\>*MR|Bu5.&N/,<^y+c x*^9I<'21Kr{5Ia̫V/YXXJx)Wڹ7'Soocoq?9)8k 7Ao7 ]Qn'^</! EGs_hIe d~#ꏫq?9)p_/;A濵&/Um|mcO)yn\lUx,nt 8ZǼ,~%,Tiq~?/`VU[C0 ]4⽯u9GUu-guK 3G2rdI ]Vq9aKrLbH< (] 5_p?$y?Z!Uaa}Yiz]ޥWvvam5oieein\]]\IG$"E3>HR՜)ӧ TR!NNS&F)RJ)6HZ*SF֫8RJ%RZ$ t9MFrI6?'G@w:|Wez֞#բo.֩c(:|Ɇzl54x:'[ޥ*ޱrsN5ڱt4ӯB[n "rc7 Me,9)a%3XTqupӍL4+K]/It{EѼ1XAoloIEM4`4DHUUUUF.\N*\N&Vz'VjwJ*9NsRmapj, 8l.:z`aJQ:tEE-%btP@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P 7ot-K > a{[zGy=Ŧ躍ŋYj}/D~jk(gf62us,5W8W:\e*RN8/ΕHg-nCY}<5lnMdx-,d*ԭCJ 5eMSJMm4|[5MsSuk 5bTյmSGPMOPQo#Mu{}{u47ww2qsq,M#xWJ:th҄)RK:tӊ)ӄ2BJ0RbI$qWzjV^έjժέZgJu*Ԝ:'')ɹI(#{ EvOpPĵFF?@*HQ<*?CѥO? =`7_k pPĵki|§ u㯊&~'G˟xw#|.&7Yx&Ǻ-F[xίjMma5ryOE?o{Y?^ &:xēдNa4-Z~oa7x#.;k214)/bWaSXZEJ)RRGd5T?f]Зq/eeʟ5j򋥆^)~~L/>02j5Fo-WW­ # Dy]ГĚų܃>j2Φ UiTLR hSXh FJ6ZyTUg qNc]S8T{WjX8ڥ 7%NT匝9t(Ֆ?r,#I$M~V~G@P@P@ 3pP ( ( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@/@P@P@P@Nj>|(k?> |ih7g/kKXS[/oapW-Ś ob?: WI0Q+)@ 7ѱ~͟c?Ͽ@D ? …ؿf1CGRgx??n ??Bl_g_j)3N+מ+b5y}#Z"NHF9UO!I1bX[˰x\?`pj^ҤT(B*~ҭI՟,5IrmsA@P@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@P@P@P@P@P@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P@P@P@> xᾑ~4&m#1O{cgQI/owbg}@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( ( ( ( ( ? ?hO:gW㿋5/h:_m't|:-^Ӭ|=X\xN*(G¿7E(HG߅A|$#">+|#h#hqraxc-1y|'m[DҴ[Z85=Oׯ5+AvP@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@$f,nTG ( {|s_?g7h~'_QXK_ Y^7y=`v(g TUߍ=_~k#kOգt55_>)3x_C/O׾(?<o^xcḀ:[_>߆o&ƍ7ƍXß?Ɵ.kh<--[Z?|-X_i;/?[|TO'Qu&?O' /ŏ??^3xn 𶥪Pc>|=A|Q*>&~Пa(ѿe~)|#oN/sx^05U|c^_-6Ú΢#GmWKxĿ|#c_ǿwg;|@wwZlvXj_P5!KM[Px "?kg>P|85Yk NyhIT3sr4]@8>?~^ÿNVOZx0~ ;Ǿ Uo |MJ-D3ZXOt'wo?j/_>"#CO^!ij:fcg/+IM/a5 5bv>9?ƿoş_@NJkiI|+\ޏxgÞ*xȒ{ SI.uM;VNLԢ r__|AoC'"_o SG_K]/H}鱵>:f\i)>?)+PhY8_Ȓ > ܻr]c}q@P@~ğ>~J߂_~9K~$]ּ5o }UsE_<ug 9ڇwn w}atb7?bE6 uڧW_ug^s|&~#hW^-] {e?ÉnW'zĠ' '}/#O?Mo'ˤGV|W챤j?(gT_>x7׺?-I|c[]}b#6ey~>qO//?txG|A/| |ACZ6㧆)g⨏Q4oj~!c=d/&O5=#׾i͡|NG/g/Tk7ڕ 5wǿK?~I>T߯|)^#NJIxtm#g<]xZSľ<|?׍u:-6hW[&&?A`{ _~Ʒ 'h5/xV'DߊQ<煾'O? ǦVV rR/ƿ}@@P@{,MlX=ڰpoߩ ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@Psw|GLp@P@6yQo ~ʿ(ʀ ?x_^, Bw|MC_~>7Í%?[>9Eqyu36wwmR-PcM cU?VO|IayQ/:|g1 h~!7?]x;W?:5mv[?D/*W5kGգӿ߲sya33Ws\hxB_u)=cL _k7 % YZ?3/E><)smO ~Ο h;]~&|iG|Wo  ?xJ7˫^7{k߇W~?|c?k~˿?KOXѾ ԚMI*GѬ|;F;BM7zT:CEPÏ?o(_tJהP@J06աz o>&iPo ~1W&>2_L_?|GwǁYzwĿZ?luxOf~!xũQkq[(ҵ 9q[BuOU#|{j__7D?rjG?هO?,I|43|Mk~[? Yo пo__?m/].ןuKk<kT{|Q~%EI5|,Ivpƿ%8Ҿ!4!IPx㇇Ŀ> #B?'_~),3⿊߆|(ls^Oiv?m;g[\`t"?kg>!N @Ow noti Gʬ@ЊTUP 8@ @P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( U$l#ݧۥ6NǦ3ݰ<%P@x엣i?q~4tJ> osFn&'O?a?oC{G~ x%ӭxAX>s,Vv "z_amIg5~?gOͥo Uq%Z,$>"&"5(u5{oѢ[ ßJO>>(? <W2_kZxW|3隷K=Eu]꺂kظr?d?;_#`>=jzE~4|mJGKdžۍ> (*zc|?~͗Pj8 `|-Fn:=ߗ{xǚ[8[^ >x/녺_ ]V]·?%˦z|%B'o#LdW W5-+R6jRiΓ&  UcG]}g|m技b:;o4TX~V?u߷&wh*36E@1Uw*?ኬoLP;7ӯ`_ڗ03kؿo,Wx緓Xu(Ԥɶh$h@>W/}Y\7yoޟg"d'!Gxs?@"d?!Gxs?@"d?!Gxs?@"d?!G~DwO?Joڣ5z(oC:߇|Ei~4C;Xүoݥ_ig?X\˺F<[ ™l=j+Cx[[_?hkt>MD}-)ubxS*Vi{jR|27)K1U۴RKD},%@q[.W&n EocӥBVjΜyTRW'?!Gxs?@ <% 2,YnX~*@uzYZFǷꡛ2I.P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjt=?(ZcC)/_G_f3P@P@P@P@P@P@P@P@P@P@G ?n_ol??J|I~?- ~'xេxw1_'!xǟ,p[xjBmfou] ɧzxC%O~:O[4ý(~.h?,7{ {?~zg퍥wߍc[j^}{Bÿ<9{w OL A)ἒ_o? ]~/Ÿ/tG=G/ |VTt>aӯu hFȓƚk"ҼC>~_  E|Zk~|i- >"6.u|G~'iޭx[K>JԬm">/f>+/ s oٓWCּcxKOE]4="X}=uOGonYtH%ßBu֚mgw>O~ȿ -KC]xcğ=eAwhyOү+-[H #o|UK~@oMKN MF Ԛ't[97kmo(Ŀ|dO^߅;ï >ơb1,B᷏Tu]>-@u~rT'Y_oGx_GſG>;ž<[>ov@~ҟhۛ_|X7Ÿ(= oWMF⮥VXh YW=ޯxZx./u41q|+~~!eqY|5QCGGxgO4_) j:pk~m,k__[M~F6_WĽO_;Y.s|<iw-.P/ia%]c{anxG|~[go<#]$W5~&0> <3K^ -WS@='+a|_oY7oV 0z0|'{3<_Un.OJ,CǭQB~@?g5|B ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JCV#gw)8ƺ<=?(ZcC)/?G_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?>?_%ꐹ S?P|q]`>kگoѥѺ+375I>*4ohz|wMԵ A-m|E=z~Ofja` +߶c ~Ԟ=?hkp]oGk>|xQz]>ၼMJߵ&kV`N~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZD"Z/&71pj)0<(xo^Wx/h? =(д;]OB5mcqz|Z^cdN~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZDKc_xuŁY_-ݿ| vgմvmBLZxnlB\T,74 /cEc* |_?k?wm{G֫~7~'8|S .[^{_v??֩jJ?gF;r|lwwgǟ}3^,$vv>lV>'}/O/-@?PzƟWϊe*W&r||^>Joյώ_^⟆zƍ7+[2)xwž,ܺ펱M+s|AᏊ<_ģVT-9oՃ77|]0m߂߲oG'7_hP@P@P@P1(|?P| H?i?~/(|P|k#LOH:C( ( ( ( ( ( ( ( ( ( (:;PQrݳaϴ^7o?i/Y|>w| <^爾x?ľ /%V-F񽞟i^+Ӽ?v/x2=q<? zW7oAG*C|"澀n؃'TĆE}ݿO ?b+ߴ7hk |S _<7vӮ|UMU,ijZW|oU>%t˿ xzׇ?A ( ( ( ( ("._OP@P@P@ͿWŸ_\>ږ\"|⧄{Jbf _vŞ}cQu )f *d|Д)G*4~Cӎrs[TϠ熼Axx9V.YmdSC)>h޶U*QV1aЮG$RJ!ԯA2Y[rm?g.i4NMnL? ( ( ( ( ( ( ( ( ( ( /7@?Gso&_cSd_  gC'l/ DOj.:Uɘ?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_PyU g GwJG=3B1izK&$gDY*u3m]P=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@#E_۟2/=$'v)$)ǯWkO O{/m~/x"?B;l:X~=`|o^FiWytFiWytFiWytFiWytFiWytFiWytFiWytFiWyt_,ux>2phMW*Wc?3}S\^OKѴm.Q5by(MIƆlMy(8ѡNUjIF)MB2j1Mlr0\&ܢkmde էB G: 4cS^%VNnsb\O_/W×1 |3 7[Υ⯅_?{O%^x~Vk;}fKt}FNUUӮ^1,>wgj̲P^XitQj)ҫ(Ռd҅jo+;{}gqx/ܪuO/1U]| jc)br\)MT =`|o^FiWytFiWytG|g/=ߏ67f k'Mpxf_jºeύ|E}VSu}K\kPʘjYߊpٚSBxz  VEIҩ(EX4V.^.<O/θ"R晶_p='&fykД*Z9FN1US(?P꿇>| #x?FgG<Z WPдO:]۵KJ춱}mQG N4piP RNÚNrciJRvJm[gqfsg;5q>cqƕ<=mT^J|%J:q!o0?J7˭Nүүүүүүүүүүүүx7Xwg߇dϋZO5vm5gOjz;3̰F a*X\?'Q֧B5)*h6}_pWgp;>> -lVN~_1\؜]JXz\,"#(*p7/xC>>.xF}j )iٚ晩hf˩SmTS,̰ a*\GS'MVBwE)MjҺѦo\Cf|XJX'qX|m:_~4p*౸zԗ$ӄo{H*.ϔ{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.B,G=7_n +,>g GY9x_;#w3v@@P@P@P@P@P@P@P@P@P@P@P@P@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@?rg^ ow>{v~ x~/.~|9}fQ kAek * {5[Z]xOwČ겡Rh*Z apتS+:xZRQRIڸBrS2P\1x,.BscթUNx\^y:SgRe[CP?Oſ&Ex6Qnk qj74C_;[+rPg8;1f7cU15aZ<4g$(U\ܜcWgG.xWpy~''`0s,$sZ3֣GR<,(ӫVHʬL€ ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@P@P@P@P@P@wQ{ٗ~ԟu^/;ؗ4 oSj7OQ߳|g?Nuy8k͞MOӥ~  ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( ( ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( ( ( ( ( /7@>%x7gg5g=Z_J 69 _Wg" ;Hҭo5=Br!O>>6m7*yI_Vo9 7),2Ib<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@{~;;Yை KtmG&WֱeM, ]iV7؋Wxi> ( ( ( ( ( ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>ާ6𷂵{+-CMyx7] ^>'ڵ<H xVFLn,t{Nk{Okn[^^\Ciii 7WW2\O+,P JK,g_گgh hZs[?/1rh:Ms^te4GOKlo $pOt hWitK>$'u+-@Mk湬jS[iNqoeaeo5A(?o >%!$.bTе{mJONKIg4'xW~>'|67g^U–~(#Ś[].__Gԯyh澼(t)i&𿉴7>'+-k@YCh懬i\ڶi6naqqeeq լ,r0Q}q*>gi7>A όլ%)ZjNLJ$J:( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:EtObcs⏍Ph^ Xᯆ|__x~C- /!Og 'H|>׿SB'_ kz{'>7hU^7> h/j\ܦ CG5EtO.?_?'C0xOXxO_4}|D׼a~Hľ"S|=_ռ/uxcPĀ/~ͿO69ُ_ :)q1ƞ%kG֩(u_[~ v[/<CĖ>}?ෞ$xo_+_''-h Sɟ G~ i"x#d c|*+xZ8֯a];g]wY4&:mu\k4F#4QwVLYxa= u{'M,c׿wa63Gu{j7fD,hx :N/~W+i]7uws#?/ JB}Yj6:^pK(/S?]nG{:ǯ>k?Ϋ'ǟ35iĻ__z_,16mGH{PbV__3%|^?ҟg}hii<`~" / ` }ã @Ÿ+ G&Ÿ_🄿io+OSxO?>߲v^<-@|'O>)J4 _Sմm?X:<5~w Oо _k>xQ1k 3k-}#WRjZ,Z\/:z_~}|V?wj xf o~| |e+V~:v__<'N?|9oе_QKOŦ`K &3x5 ( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:Et_P%x)g?W,xZ־Ygo4 A6Ѵ k&K[ ac >,57[f?Oc_؟|;+ g=kZV.?tM6'[>k_FFSgG<]er7.üwy꿳;3Gn|__cfO2:ߏW ~ Sτ,-,/7w/og? >Ykæ>8|L #?,_ C\al}?q@T~IP~+x%x_aY /fX,߰W>_Om<?_7; N?? { 7&_|k?cfaW~P ?%O'ß)O_Eg->/់h*ڳ? >|>x;zƚT~5ߎx:ǟx{BZ<\ڽ:$79j GeSd8oX?h o_߀־|Tcictk'$+M4 Iao^iP[Zj\]C'"p]c֍bž|^\cկŖ> ?|6>?w'fz+U?|e0~ fj:vxgº|,.|K<%xĞ +Xu/]3U<=۟io ſWk~>2/\?/>QrDu+~#GB4|9h m[[o#f"X:Gc-x/~ |9Y_AtwGW[~¿',|13~˾ iM!>kk5 o,>'~O&%Z|A3LMͮx+=l*7Ə|!ր?@( ( ( ( ( ( ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ( ( ( ( (:;PQrݳaϴC |{>^9O ᯊ>Եx]>Ykwo=5o _Z?Bh<~Z%m*~͗4oq'3vOx?EMl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļ:~Һ%?h-_gxkѾ1 >,|oh^湫d7-ιxf c^ޙxO>W?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,h>?dIgZYS0u_ kxImKH,m@ף&7!pTʔ¾ B4bhNEh'+)V|oҿc%1O+r-,v&i9fueU*ե,0*8ӥSJC _ocXٟ _ocX _ocXƾ gO|[ڇ(h״gW//nnwsysk;k_ɵiN FS0' F)Nٞ %v~7'؊ԨQVHRnhGciJ1Wji-ZA|'?gZ '5+FKd^^[}:gxE*;P3%IJ5f~G*grURiOf(˖Q]OT _ocXa _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocXD~4^#b;ό nss`_[i~$A> |Mώ7ggf슒W,ud+aƤ4 68_鸧qj_j?I/ uex eҩ -t1ywX e\>OQ#Z:CK79 | %|j)0Au·"٦qàGexK"Ş#M|"xgTIb1+ԅ9XQm>vVY㿈9W~+q_d<%4Qw(ɪJ>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>~>,?#^%D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@H/7_w߁Sco=;Z3X{ _8G`a^U־{RuksssOK⸗kXrYgԿ9=;.noe콄m''@'G௷? t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(o /Cn{#7*lm?'U Cfz]_Kc\?~#xhL 0_P+ʿ֯~jTy>nnnwI~TWUK,''c}e콗>D| "~?p> ?O?8xwH<;ςo$O@To Ka_ǿh: [~6Ӵɳ'4qi{1uçWw콶FIjqU4%FgrD| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw~- Uhmߴ~†MJh]c+ilˇopoTi S%yWZJ/'խI?O/S_cYʩeRXo̹?|H<;ς\o$O@@'G t[I#P:|-ÿ(?`f)G{A-D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D|ߴ|>||\~||xPeo ]=gk|>//|I< nx k7'gn-&kactq0*J 5n~gNMC/+*wG8ox+<._<-)cq}D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw`~۟C ?WFh|}oÍ;"l? `?iO`4>_ھf( ( ( ( ( ( .ПWo~1?Iޙ~_?Z3B ( ( ( ( .ПWo~1?Iޙ~_?Z3B (?>?>h%e.l>ROx~ WQʶφKizBO-o-hiV5¬aEN*J5hajգQ&N#RZQM;~YOUf'Z#WR`3<8:(ᱸM|.& *ԧ8ɣ Bw|f;tIoѿO' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (?8 .X7ú߄u9? ^5 VFRѯ4ǚhEB fUQ:w Tf8:u9Ss)S$Q2sL&eټ8mc10Y<q7 O1ҩ aqh¬hiSMF#%ƺ#p}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗ.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( ݦCt=y|E4( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( ( /7@P@P@P@P@P@P@_hO+7~^jLLh?-~@P@P@P@P@_hO+7~^jLLh?-~@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( (~ܿl~P@P@P@P@P@P@]?zc32g h9fP@P@P@P@]?zc32g h9fP@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@_hO+7~^jLLh?-~@P1(|?P| H?CH >~_>5G?i'oh }P@P@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (8cP^֠)_i?~'(|P|k#LOH:C( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d( ( ( (:;PQrݳaϴ@P@P@P@P@P@PuBO͞AP@P@P@P@PuBO͞AP@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@P@P@]?zc32g h9fP@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@P@?twfh ( ( ( ( ( ( ݦCt=y|E4( ( ( ( ( ݦCt=y|E4( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( (B!_iDŽ?P'zgeEA ks (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗgb-.i5|/֧*?_jW~>k.eo߳3ne#A0^2I'<)wcG_lojx3/Nkd?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$??o4ZԴo럳kͼh/|R۝6p1DŽ?P'zgeEA 3/NksLKA/Z?3/NhĿ;E?K$??^&5$_gy{[M +֑̏i_H1"+|3AL3;a8?&4$gg{C ˏ֒m?i_r-s#|q <=?(1a8`Ŀ;E??K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z \?%چn?cfSK)~ x$]lܬg*A?ϿK sxKg7ʏǯ5_r4(_x__Ï~ǾO9x>~0@v5m;6MjCѦ4]4o:u| uEK3Ag@N" w ,+&|T1NYO LN+{O/sXT_X75~|MdOm}Of ۟?Xױc>??i uEK3A?&I1[+-3s\IpeH&epɸG,9 HB/cVӴ9~|IҴMM@ f5?_-mT}hiZ:捤ki@P@P@P@ xGWh ig ^g?_xAw{]sHyI59| YtZO+]M}WU֮uMHG:P uEK3A̿'GmWI; wϋ G>*hw'ZN'~WxO]'ս}orr{*t/K mmo?H>&WX\'U쟬}g_{eW4G:WG9/Ch,?u  |n`῍+;߳-N7Լ/x2O^Ou qwu֍隝 >%Հ9A|U%8'{OÚG<3_x7i:-Xa<'~Cd֭=m#NKC[w_T`3??*/_|A!t y[to_i<+OR4{jVVZ6&]G -.? ~)B7 T-bhΌd͸&Z q'B'9O8L6+}[s6eB#N_aV{*{9Kqe~,?oO'eAȞ6|O7YwV}k¾ Ɵl|?sF͊60mdU/&8/ `jpקW<\|u(Т⽜!Uj黹kk%4xŞx8+)eauXiᰙkBOqU}թ˖Ҵ/QN~fh/~U[|$ w?K.$82K8ed#Kyh |On1XiO~ſ $Z&߇~ xt 3Z/̖ڶsiz>4BsF5o`_ (>@a/~τ~&'ƺn_jr^ W6??i uEK3A? s_3> ~Y|@Z3s~4_s8ԝ+g/+ç-hG#:j"Z_pW4~:% |7⛍1żIǮ%(uA_V9Gwkh*£R___٫c/ SO"?ں o_ x]x &>;?LOu/Y}{a7?4I6WV/[K*%GIVUԯwv_Q+[_2Uj{JrÖ1kg^%?_RYP35ïT"v|M?Ǡ8eՏF;ܦkk(|ma^Iv%XKgquY^i:Ɨqs]RzŅv77wFI( xWU𞥨iF]ՐԬilMݭͨ/A4[n\~3RRel.au^Lqy\9^+ׯN*xOQE{8B<4"wrKh Z0AtBoۑV++)YO>AG PP@P@P@|}A-nbۢVʈ-ِY̟ޕtP@P~տk/_E@ƛ?jXx:O Z,o|1|?CvAj?߈}gߴψg |/{_gBk;{ wXx}o7D/Zfm;Ekm}։iq5Ŏx@-"{ɬ4k9ooc ~<9@hP@P@P@P*??goVROǯLm7w>:_|  ~о#W.>._M&ԬEjxB/M@PC k O`B>;Ҽ.wg |<2G߇;ᮯik>mSڴŭwwwJm'#UτdٷVu☿eW|IďjZ?KLJ^غ> d|9impf>W~z|}rGࣞ_O~Ͽ o|1>.cEG?thTď _x1v%>/7~/\z~}?Ó÷Z%ίieE&6oamـ|J{7eޚoC a>+O5k jdǬ躇u;k6Z5 ;/ɻ'f?R/?V\[u~3|S/Ckw/bKVѼ%^+,.t$~:WETPmF oX^O*o's~xa_u6[%-V;,b#k f3x|8nϟocIw7㖵/=to>#K? h mvO}~3']54 ¶_KjZU~YP)kZK@?m??KN( (Tɀ~-T?h_o|u*!gg  !/;|G\|] \MXE3]i,_Κ?M:?# _.|w7x]co?^yexw[?]_? |4ڧh5k[Enf +O) Gkxǟ >n1~77گ>2+߉4ԴW>a9?u{|`r|2X?o?WE>$G< ڟT~k//:c|]Dž9_<-y&t5]bxg7Ifz2~ֿuS|K}h_Co1o h_5><)e,''nK_J%ʋ\MNm.OM[ a'~?e&_B|o |,~.ωI?Z7|1]C}gEm?{yjF) M Ŀ|{sI΋[ke;oO |ֵA4K?#]2WֺLJ5JkhVR?kطX?c ʿ.u?bOثO|g|QOk1 l#g.翷_q_}o_ [~~uo+xR47SK-i6/jiEPMu}B}<O%4x'D_6>x>+)AMcwMW#|^GEt~x{QX5MSX}q^ &WW@թ (< \|bw*4xRǞ?|Wj~c Yuv׺mص++kjwv7>_wm<$N,?_jC0f_/:$׃%_{/쭣xKGмWX\3Iu&Vڍ(4UO[,''>Q ¿ */m<oK{:[9EW:wXF@-g8;qY?nēxo-k_zߌ|F~ѾRx5ڔM  Jf(=#k%(ǯ Q@վ+>|G?3_m{DsI} Ŗ>Y+]GK_/S|e'>1|*5i,3[h]QKKn,t]KĚiMa]Y{x5̠ cB ( ( (9[Pk oZ[ fL0 {*FB躀 ( ( (>`bXcĒ}I9' (<߅5| o^ Эh5x?X4HmIo (b ^Yyf@~:O~:g/7coĺw(|?~xQ[ ({cZ_JiޑxU:Vkt?π!Z-<-Y8fkMo_{j7_\~'k=RJ5-O k 1]~1NCq oĘ!w _G#}?ob _٤~a(eJ<a wG?tF 6O0lF %$i4"J;{JsnxJ7ndkk_Oc/;ßqA~߲ſث_4ggo?Y⏏7 x[ŚjVzM;S^iookf?pdoN/ q*|"f|>xbmO;y}u]{{{xX]՗O}WS[+ko&0SFn-?hwoߵ59OD><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωxj)njjG|~oxS^'x3_Þ$ޕx]:4+?$k:G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?9>$~Ÿ S?'?B&Z෌C?~ }^/.|QjsI[I-Y'/>7ߌvCSG x+ww .O쿅?|MЮoo4ɵ>tP*[k#q:][inl|SkV^f!?"hA|Q᫭'ZơoH@> (>D><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωx }WtR_ ^/ ~2x^ N(+z#^vX>OD5i:4϶B[0~5nog _ǀo? *_ ?ú;~>.|?i1|Sj=+QT@n/%Ѩ jCy |ٟR/;Zs?^k`f;FsX۠j馻PJ2gFKx8]]];5t<1_؇JQO _RKK6~.|Z eLj|M/u]^vr_mN>ķOݏ߶죦x#n3'_h~7o,e}BONE?+뚕Aun?\f?>~<)+P5m#]ծtZKլmSxJV/QѵUuhޙZQj( o~|y/G;]x #xSBhm|EcH {!&6h1ydDV`G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?<|5>98qgߴ+|[uOxvSj(p'Yi<פӵ1-曨lFg߲/iW_s׆&h!KX5_>'5Yt;[7u;m?dmN~Z|w[Ě핗|YOw6jdmw'x`V]5K0jTque )QVe(ʟubƼD˂9Sq1Yq&|&Xzx*WPEIb«Wg> F_Otd?~aG7\D9OƬ!:B ( o> ^x5Ս'7M帓5k;ga xG߄I# _Zd"ǶmkLAgFҵ=B?0VEGg9u,W/}KKoi8ԭFݥVq?kJOjVmU+94pR]tڝ>#^z兝װY]4# #UĺqG20#n+)rx_M ?7-WG%xw@?<߉0g&ow-<}_<7~Qwdžcc|7V  oa5,σ?՟e|W=/SWZ>_7[/']WĺqG20#&낿)爿&o!>N(C7\D9O`qy62Nib8m<>zap،F&0thЫRq/?~~7MG~#r/M+\LJ>kvռV:T~(m %^Io f!73Kp^$b/ #gSZݿv6sIxj́%m/7e|BԼ6)j:Ňxw@ԏOSþ 4(Im;=GM_=ebQa!a_7FpuӥfjԧM9Nԟ_ 8KW/d0gS 牎]0jBY*Q ZZT ( |C犅ψ|sOj6RG4o\XxB\_kzi]ɌAvyWJZ)ѣNuUiҥJRIB 9I1NRm$2Z\F"*(SZT*4iSJjq:pr%7&[ ko>d& |55-K5 2QêqyO\a(c1X)^<Q,]?NI-]/i)VE Џ^{)9~ pym,pePZ_(ӫg:.' }oG7?u^K!b< k'Gs jKlf>&qO z_f?]_ UnEp/꯳3grxoo7֪M4# #U.QĊ?}&/> F_O%ӊ?y1M5?Sm?زeeeo  m-mXmjCE 1"Gh%ӊ?y1M5?S?෿N ZS?cߊ:q-x7ud]{fyfGG%$<~&낿)O[/']Q<~&낿)o; = G/~?|#M7DS⏂|o};?_ j:D?c#3.?Lq?ы`8(bVUkUiSaG.RN(C~Qu_MqOĹoM~ǿJ+COo+;d]{{dU)$Y0N(C7\D9O |^xC⮥;OC~#5-V/Ge~mڵ'K}CRxR cq7a3֥1yd13`^,&PxK ^SjHJ^֝XSׂ8 ƹv0YV? y;)g50t,+ FmG.|n꾿%ӊ?y1u_MqOώV?&_A*|Rd|Ak4ou K jĶ6k}54'kl{T^ڗ0/Q/kHTKn} }5'xgk8%Ϟ[yE:3,6`5,ilj܍F7}3 ᯢ VM{\]LW=_Ws̵\]oo/z~g <=^[W14+.ef^ESj4XyCS|ܰ[/']WĺqG20# k# aӍHc/Uє2@ "N(C7\D9O<189Q0F*^Ymld0J5k (`ԔԚ䟳J4> [YFSap53yn.c2,*f80%){YBUЭnzlպ3$Jʹ**Wg:( Qگ ( ( (( (]Gi^X:6gqkZyo6g^j:w$VVv<*$p$B#0TשNsZN*PJ*M9MF1ri$ٕjpj1iPB֯Z)QJ\TV)ӄS9()4 'Ʒa-W3ѼcMg WI6n~ Fy)eG{PsrJuԮڸ^oE/7obJpěpcb^Jy %V.OޖmCmGQ5BVկ5MWTu=OQPu 幻K$yY]vc;N:4J!J(F:tSNQ!BJ1RbI$UVZթZjZYʥZjIΥZ&T69ɹJMMJ( ( (%CGEUkzOطık|cE%Ҿ -FSzuGHN nt˛=73^_[ |:CCo<3? *{ο"G kV2xAͼ[aNa|sXN3lʶQ3NS2T;{:JRJ*թ\yr'gw ptS9*ؚMRi1iaPK ( >(|Nc+ko4L׵>vRG<7$|C)#t[ۇI V2(r\5ŽYc߻JtJUkTvB.Je pV~q.GyV#:eeׯ+JF =(ޮ'VRЅJ|i6_(p/vٝAaKpLbfaq$qi1?2uCo_~e\ y͸%8qeߗ/R)ʴ^Z*I{ xd*w~?,FK{|[9ac>\9{sfIyVYBrYi:_!P@P@PO? <-m~7 ԇ]eoxt+ΗiYxKBN^~UƷci<9{{x313dVqa+)s*?UsWV4a9Bk)b?8g#ʼ.^"q3q83xTx;p?y᪪2U+}?JaLmml,`(IpUaq =]a7)JMΥJrVܧV9Royq&mybc<ʿ 1xNsh0TaJ(F4ѧңJN1"_ͧ P@>8_]{"̼lrwjSoxz-vKxnY6w֞ps9<$TgiT(a^bt(ì+Se r-|uę,Jck . 3jJ*U|`/o<8~~v7x~iw7񐵛}SXqտ>-V8VK rn Oy?jCXG(TrSW3_YҟsxZ_' )U=BW#9Mݧ 7,3PJ8S^U? ( ( (kJ>.S'o3?=*h?gWs3č?|B\^|$pIZfomausL4\uRj<;}˪YJa,ּ\*hNQG&~"Гx[/c|ڕiF.2 5źN[|!ЯUq*ןR//R sS!z^Ge]7,w#i&V.umBok+O 2L*(ѧ8:|n*Q{lf.Y]B)R)S#s:3g6]<]ղ céK7)ԩ*Nz*K[C("/tDּUsF<5aqxž%mtoJugi1ʀpXl6#Pb*F = sZIF:pR'Slcpyv,xaiέjet.Qo.m棪x[s}K \7<{kPG'RKxOj@,!vk_!C/ҥƮZlKIFyOДIQ{+V'<;[z8& ~>:TMF_nʑ>ױ 1͌Kq,,<4L$K#Ydr$;wrYؖbI&QJ1J1I$I%dZ$-?%)JR)7)JMJMMzoVGL ( (bTWx"G|QA|wV84xZ.tcKkw+|mݵ2\3C yxl$ܫq/OkV1`ptR4Ekq7bI5%%6[(keaa:U!I~Zgsxqk6_$P.? kEܟ桨oT핻ϨھBMugq~w¹|I{<h?utOoڞ.҆(VVCd>X#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 O~#NYSÊV9C[:s|G\\]J >߫jޟp>1/5LgHc`*^7\W ]G=Ex_hɺx,"+>i}g4rD6#:ׯCJ/uYx^4ɴO*>'Wop|o#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 e\jN5GRyU_'_d|g뀆N,AoH_;>7m WO.Ee7a2|,9aT9>jԲuk֛u*i^NQc6s?sS9^z-*4hpԮ.*|)Ԕ/ k]//ChHlWHGG`j]݉TE '|1s~ ?aM11|eso~s{-◈,-ͮl0iWw:Gxqx׫tr3,L_C.9Ek^WˆԽIUp?Gì,0d8T,?5ޮuğ8Lx,&_0xirZIխ^S/ +wW:`3 3?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +k@| x/^?m5ʿ $׃niIτүUg[ekX k8SCx.TqOO-| 岲c{u'M9'χ_8-xX2 j:xL\lÌs^TrLg(5ӊ:qNi{<^+uZ|F*7Oæx{CO0iV @_b1oc }Oiea)¹ e/:f8 u񘙷'9|ԜiBIwqmǙs(!ɲU<%ʨ]ar8+uU2RMRK_ fw'V$VWeN U9'N>udNe?:4?<Ǐ_[=ZF>45Xb@ѼZ>iQ[jiZpJ\e4I8']t&T [,V0gPUR`ixWʭmziŸ◊\on.Tk U*T[~UTK euJSR ~83N_{A_(ɟff_/%V;^f (aпX#|]->en4J}L!@? NxK19Y/ N7 JXP[UΤ[TRYM51V'<׫b<4 Ezէ**Q֯VrmέYΥI=e9JNW? ?6EwK:Ohm}ݣ9#PQt| ?R:%G_*̿+?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'qU2i|@CŞ-wxU[[Uugy;Mҭt y䳱bGD|/:aipN"a'N0\*Jueʤ9rkv}YkYqcGVWS2tVTaN'8R:tx)(^O fw'V$VWeN U9GI뎿@( +g/ߴ|_?𽏇| }io|sm[ʺ<-jR]u=A{7%ExuydYma+O1OafjWM:kt,wmx/wdy\<.[R~N2,*18_=IYͨ*1_)oW?eE}B?Veͧ]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw]xjO$oO|z|B4WO- F0hWz֗,>t[{IuKKKW;nm 8SgnpwNnjiFx r/vROv}+x LOgXҶo Fu)jyєpt0 V*Zg-[[Fr?O?`>ouտ~8+?TϷN?[pp!?)\q@P@?r/U@uP@P@P@|@P@c'KOw+4;q_"r^!^~ ,ᦍcKU~7S|=i>*!'¯vWEċei4=o]O4=X$ _7xĞ)|s{ip΃#w>uq?<k_<ÃqasJy#4kQ6QF'81Y2k #pOx -|vI[!Sd$MNF8ƩkJNRx;<qZ\28JPZ-8BykyFXEG*~΄8SV/5Z+#✻q-Hѧ[WCΥ<#% _k)ԭB\f+~ؿ( Q<B~ǞkKU yxok|o i=o^-ӵ?M 74Ͳ:%BZr/-Ĝx6*֌Vuo>W<q5ԣM*06iqH(v'2:W_ 11B?<3~_5gGO7OPx{-0h7 RVSUԯlo? ?dc 0 ydO2TTω,E ӭx|4gK JZξ&?qS߁.Wf|9q٭c3Pl+9x:Ѝ7R\>~b_Z 2`&_;GI$wwWE\׈?aޭ^W.c~k? t-wǟ a WŅ; 뺫k:ׇ4Xrx *ysQc u%i8aZ,w[WRhm|a쟌|,PƮeg^SeӝjPb(KF,ti 9ZܰJ**~f?K/zO9,AyE+xo]kz>6:,,OdANJuG|<Lӵay:.>U8gu2 R0U14byp8L<,V6~Ҽ#*s|AxOye?9yJ:QT{,Fgp(bqYm?g8UN8,:/ 7?j&UK˧g֣a^+mX׵?3Z_Cs©#>9tX74_x03X "U1L]:.a1u)Ucu)taF,V te pgYS>K,~f?W;B .~u:t1z,bIRaKMj؇dkO~%~տ4R?o~>!vw|)K~/ _w^E2.9ϥ¾)'^(nyF\4hС=BxoᏈ_ƹ: ?u/j )|Um/:6K?>*9}>|+~ |oO e7pIps>:˱ˡF?O (JrGQPPF0bؿdN s3!N9_<ڮO,MoBi%#XX)Uu!1Jf۪hb{)?׀c|:DO]פDtQ_~ IUGT'< &bxnϫFy𸶣 x(5EFa[c%VQu)b*?/8>N fXcɳ)NܪG/(BrpJg5iiBHGR6OffukX,7c05|E*Ur*3U15h ]XQt+V6ں?F̿sqxl-z\wqTpt1iSQ.ZqPwJE;IjO(54? h=_C!<Ӵ&xŏ7|?h735KR.5Oĝ4>A𮵡~8K"ນunk< 1ptuT%<#VROm'^T"B\KjY#-upƶYfa=\~/BmNWpMէx<7c~Þ:z,bBW<).?tO^YO/~%i/_5MV)_GY x:!:vXp]wc9> /S50PTሯJ#,UYx| Y_Oe]G!̤Yld_K*U{)01CF<\"oxسg| FxKdƗE~jzxOCź[k;_|Cx~/>V7g_ Y^k=c?7^*c͇XʔE,=zTgZtkrl"J_*QUx*a|6"\!jx\>; _ }:/)'}CC7#߱֘֟дh6cM_U2aRˤ8״KueMβ YH}KkzӊyT*'o8siO2xTeY2Y2MQ*J2:T3δuUzJ&??j/[j*2O]|iѩB< h ,\7F>5Hqxėou+7=W'mœxRĹRR҇+c0:tRxʕ-\W `i)OWa5*)cS,k]qϚJVCGY\-zx }~`s(WNJܷPuG_hP@G?eH.< oMEq#¾ЬB[]/ӴW縖+kxs4PG$c\1qX-*`Tm1vS[sey CS=|N&iQڌy$+rSk#.~˞<g ܞ,wMog>xFG~̺X_&9il|3}\j.sx~k֫%, c,E%g,Dq.0hҨㅧOZlD?x&k |MT#z8>YakI`rFx0؜6{ ]^)7?bSž$ώ_ÝWG2|c(>,xCT4zω#Hmt w\⇋ C2.:q&MF3ʱ8Jj?ӄ(J$!JeźiSRt(0oJY>'1"N{xw ^baKԩ[8NL+֩ m\e~|o'~zoĿx,Wkw9c&O:,GGB{s6xu=CI4 /#n 3 J 6kDRexl$JTS89JkbVss?_|#ps¹>;lV3"ͺq3f>1P^fK aVt0P ql~ԷO_ m@84ٓoK ։tvznh~t_沈杨àYմ1^_aJaa/H̥Ӕ%.ycgMT*5u_O ɖYM[ y.XVXx;Te|+޹o>/|2>.7dxa2|F"10|t[:W',4qJ5)׭Nq[VtRҩSK}sQJ0u_!Ǽqb֮Ep}LSBvź[?Eo  5MO}}\ g`y:od/".cM':4)j:x, '05j,"~,{>_JJ&JJlJ W_7@ > K}rWߋ<ť05 k-g 5NGn]h4xs^-7 03Nyvup%:WJ\j8.T0V*^/ar*qiVmyV!G ƧuAVk SSFJ\³傤[ᦟS??k> _{B(w/u [(!<1nu+Zu_x[s:>2NYn;3zlDq933Yt:l0&tRKRx|D__b>$g9e)a!V8\v$g ,M|7],}ya,Vt*%OZ?ox7Wgߎ>?3Xs65v^񟄮/a<7_Z{j$.BHwn:aFXY.m W/4kQRnUC%[Rxyx3(ʥ\^O,nCԊR'C8ʆ)SONT?kJ4{V&^_Mw4? եCQ??7+I&=kiײhw]#Pfl]ogkMT6ڭ\ֲȳArۤcrIp~QAԂSpirJQhV_P:#RI5 UӜoQHZ^ gؘҨ;Bu :4b3 S$jV:PR9i@p(_gNSE3O?wxauI~|%cW; [|=//%Ξ"-5yOp32n+9n+zX'kQQJ^ί>bQЩajY L&'7|ŪfU*=.~հX*V&3^ i_|_|1b|Aw^?u?>8|^#>^Ro mľ>':շfqwxqr8i Qq,pH9F ֧'Zx5jW/+eKxW̰Y upx|%^1KV+Bqc0aK%|%Fp_oxB[x=݆Im~"x_~!4iy[Lӯ_|T'Ym/iR0t0rQX"jpJXFHѡNIT/٨}GO8㼫.⿲7/1X"˳+a(U$5#BҞ#Z.TS uK'(G_05_Ɯu{^OY*? mNTu Uo [.|o9#QFA/w;b,5OWZ?^ΖgTⰎ2IШ^gN,>.3^{S 086\G+k.kRqќgF*iZ֭_:^27!$@e[w/oڻ]*[\QA7>~6~ͩd[ρOi/E44[=G׾!S:nZ-wG%apqf/BshœѡӍ,4珧*ЧNN" 39x{X:4ȳ T!Ju_NWVKJJ<=\>?V_/8пc~i\T4KS▷oxUr? 1W&{J q^o,G."*:~Ve*0r ѧ,= .x96;|C,5 έ%,Ƅ2'& ,;*R'C EXʑo;Ge~f'ïJ'_"sYDPG;αΦeF.3Z2K XJeg{|]:M/D!~?K+-[>.-mcvzv'ş׺qxdž|oMKCW,G:=i<ѕ|1F0R)n5Q(Wq~K ʗp^"8\/sֿRQԬcN}gGN8=*_Q|"<~vJN -Ʌ-cQ꟔I5ۿ5Rڃ^5k5izį $V#i2ltۗY*<Rk;RJuk؊)&Z)ҧRqSyu ؏ U+NfO5/޿ڴ/C|hͨˢkV'VKf`?& :}jibx4E rנd55ZZN\qL>ب|U|l1pK|6*8~#b!WPtSS 8Z|w?g,a{~-wtyK^%ͦ\Eh?|Cxwƴ{-7Ŵ?zƟYo |A̲^$[HҮ~e*+ʬ|bxØ(N_NU+A\4)':|:[g{Cٞ>#Ƭ fٓQ_o|Koțm =[;I&AegOo N;6xlFcЭKSq#^ҍㅄ1|<le`c0O1q8z Srx\ [N}iф<6..?W׍n{Ļֵ|3eKY\ip8b0<ʮ-ўa*:8_ԛpج)-qQ&|W?bWh3/*πߴǎ~~ʿ |;H_߬x oiV<cIܴ>4jK.o;O"᜿ <lj(+c\p|FS5<#IJn1ž-ӷ5:m5TW3<mpv# C-Ԇ hʞ:5k8AQ˕JVq9ԥV0?q~hz*-S} x?u|NqkfMmgŞI] T_8 <|KU7kNXV-ω^uqWs%d_JO q͡.WB+q iue) J1OXXPJVTE|q_ї|QJ8%~֟|5Q[ݴVZ{h x~X5 bZ|S | :ϋ"8w(j'Xr*1xG' i\>2Z|ЩWڸjT K:/aV1 -[qQT1xل 5T*Qڮ"t#<1?? j}.F|3 Z!oTE?I ~ GFU͖}L32̷/>+&&i{[S(ӕ,ʣI]J柤' q&M.qgr"X3?q<aN5%K:J\N2N_'M}ٓUh~5O x u㷉|-|[Zo+ƩsV1b}gU[~x,3χՇϳ,eR1jU]4q'yVq®__nYpՀřpQ(䩀u\ƬSp1԰pMG .x꟱>|wms]}z4[Ƶ4|96m<1gu#;_kīmqׂ2apXSfѯRT0:q[NTq#N3b*cis3V}xQc,*Xj5+¬r(Ũ0֕8mT1 Y|_K"_ϋP?YLeOĈ|;/k=~Y!{~{g- źZ3-qu~*_^W V "UQӧL5jqT)^{W,W ^/IN\G;{z8uQ9eZWھo3/gB&xw7c<ڎGug |!A]}|@jCֿ<+Xfy|!^^*1ův 8׆ uVpǎe~5pA9A'9C/XGifkVE)G S1PSw[#Zo '6c:9>f ;鵿k ^=^<_D)K%Ąws-A{:uq'\+s8j8&lV"IX\5=WQa:% TN?g|q` KFpU|v6hpsMӡF51iR#? av#jw-ɓPTyLkO vyrn5 JWu~5L׍yy*4ԩe rn uN +T'(רN(ѥqGAO%"b+8WͳZQfWZNJr JwZ1GP!š@ ⶗\x#Rij~$nj~+K> \ֱxRO,βÈp]ZSNД#j'QWRxaӺnpe<#,;]Wx-2YT>Ҝh5nHׯ<\SNU??OiJu>!7E#7~++^卢ƞeƹX>*iXjvחڇSm|\gX"X*QVGxa[*1Wn |%O 8`5J"ocN7 Al K'wZnSZ >:.mvϠAO|`, 3Rҵm<|O\Pߊ0RgC&S?1)cMІ(N9R^ SI=SׂsN*q1F6tV.O _I:lm/2|e42f^e.siciY4({\:wi/"82sl7\V~I~X t|BJ)Jpi|.u{ {#)|vwx: |B^[hau;WXԼ%t >omd8Y|6L-|l'<oy}4&XJ|i$˸ rc5e1XJ_4|AʝzU#Z3p1f4)G/_(|eXGyh/)bOq'/4|K57*ij5}&3G >%) ZUj\^5a^8JO2N7^q6qOǃhe߱Nx>`pOV l>.6xZ2?Bs>5 V|x^/]k_y4o67|m umcm>{qg`83m(b#5H}bg*~Ւ)BsNWpL[kxcQ熼em?Z~۞8K qxV-!fa36HMPo-~O5U87i7 ZqK qT!AUK7^<ĮY/05,ʆg:9v],e,L/ JXIbjb] :҇?E e|e |TҼ' XkcS|t.~$xCSCP'/~C M_ Yf W 5tgSa? QpX:xN*Ӕh›Jhk)gy@/uk+Qq&/ ۷LXo+d"8佸E Z PI."-)\Ŭjwi޾%x\n_)6^?{Ғ|rUbҔTkUf>'g<#¹uX5u&fU=:5a% Jxj~oßxO_ Xx/ǀ{}Ŧ[dY^[FfSԯgu 뛛˛3,YeckJ'Uz%䒌#)ӄcN8Ɲ8WunA`rl KexaxJ)R9ʥZ%:uju')== :˦]Py MѼMJ._O]+W_l~ݓ{:^K)!֒Z\E3l1\Ex08UJ1`%¥hޭKFW*jXJ?3UpY,s L#9sH>zp򟹇`^*qiJ1RvԌ1?`/?cO gWmmFAƿ^)׵=W_t6֩ 'kо!>!59#U/8p:4s.uhÖ^zSZJuC5w{Z/MI&+1xb1?Ǝ".|!0jX\VYRO Nq_TRXJ?G37|q_A9g@~%hτ%jZ~|{xQ#Ϧ h!GI0+LЇ.xyVcJjR Z1F^έ1Z\gfobj{(`)gJi֣:jNx.Q,ol<)i> k>[xtRFkMCzjk+*]s?ڝv\FO?X?hh`*K1F8*^aZ Rƌ'#(!dҼ!ax[2|4qw83߳Z9&(_O+oTҡ/F'nEGBъmmc'IT֣UIlMzX灘\ ORTjiT 6E:)5R5p1RrVB ߈>#+~xÿ4O7ƾݦx@EX)EW~d3epeIn!hS_3*2N T4FSiQQ}bN[% SS;⪵abZxS8b0|c?G'k_M|BbNki3~V?4[/V:K˫|_+}FFxum{Mih3?B[" "{%RXK݅:i+ZZ6噆x<^*TOLj x7>4Zϊ"8L],TӼmNY~;)WGdyou,wO'ͱ)+iVi޶"ֻaVә~c,*2G FfS͗NmFӕiVƱ1{{|F3|h-A棫qxL0ɟ~.d&#?|GCjt9lx7ϏrRb34KCVSJ2^8L7^mQ.igIWS>N`iq8 1ŷ(KL†I1ut_߰>7x ~a_ԊK&Ǡǫk 9(Ddn]߇,sƘ ¹^ i3˱q0M*:40iS;Eฏ3C}O60udᾭUVbq-?¬n*'5 tTڷWJ`>ſe{ĺO[TajMHk~.xº\:桧7~+`S*9ng_X|ENWXy4꺘_Ood\0hs|O6fjqⰘ_bf'AQjITT>ɮ+X~?SxeM@F/dN'VdOF9KK|UHK<k!h id_xW6O2+c\%ean`YkΦLhBOYO*c*u$y?d| ƟwφRx[,wV|o᷀4|&mf).!|zxi:x9եaOBt)CZjxj+IƆR_e~|?MGNѣּ?5OࣀF>&iQ)S 4ޖ#*Ӓ'Ne,4CtS8fhTkjLhBw`/io25#o?xMkx86io2j uO x{6M}.l>!i)&nLWeR\Ɲx:#%<.%b׃*Z8>TsFwOʥG(2RXlNT;q8JM|$t/\dg+i>.>'~ӟ]tH@oxy_4m:[([(c2,.>׈0|U\[Kҧ弹1Uj0NSN?OMPLn+C-b3WjʦxOSV0nhO_[}2-j<=xčKoMk55WO&}2n&4ϱ G[2I&XKb*bV2 ԩJj|O*~,4]}(5ʸ\gdIK NXRvgR/+5F|6/(YKJmև#ljdO??xÞҿh?٫_(<7]B׭O^ ֬n toN ZŦoxu|C$ Io[q&_4\U NV_ 58eХRI0MJ[^|x?589v-ƌhѡ oI{F*ф%R U{Hb(`x G:K]FQh%a,&. ]cbQsZ[|_?5o3M<)q|_~~$|5 =喝*G;>i5kxL󬯞zc8p]N}^椫*aUѫJ"  )*#e\gyOcfYj-ľJ*MjQSiWZUcYTOq'~<)>&gYд+nfAַ~L/5-P.Kx5M6n*gd:^eP4塊RhiI7$O5oԧ`0gy6-u^)X_'<7NUA+/?_~1FO ~((s775WMt_|e]mh>2~]]ǪXYX^+K򇉼EC31_GCJ+apT>VqVXJ2xܗ&+:#2+X\n>.AO ~&pux>hF&eqdF6}` F VHz7POC\VK᜛J2UX4B5Nthƾp<_7ke8>'YRIx8[m#KM/ZxRi?N|Oei`w%Σ(SZ>qQXa r*9d*wŵx/l8 ˋx'5Wxl)u /+HsFu+VP_~c+N -^;ۆ?;:<_gO;{Z֫sXZ qus,q(eEfxlקahԯV*4iEΥIܮ?`17 j~;K aVb+MSJ#9%ӫi&; Ɩ/?j/s_ڭ< 5e`*;F⩨jqh~O$I_P@~(ȿW[]IP@P@P@P@#qg/3>8W_HʜorC[{ćOm#_؞:YKO+:! 0?<5èxc_{V5M{M>xLJ7`sL=?e0^RT141XxRb:SFe'cR a<WL{:j (B0ѫʝ)S~4(OGմe2|ey^C|Pon{lMzu&aφ}OH.4_xs ̳,v#?aj*j5|*SW 'NIJ1 ΄K=Ubr|-pJT16.?5ƭRӭ9iEږ&lo||)o "W\-/>~ )L>`>#.]lB5Yn~ga?O;ŵž/ `cKf4a HAVQ uVIДJ/ܷ)dwe'VV*V:zP㉅eJ'~+a[Gu xlx#χt/H߄^Y!3W X^fs-֟'>$ֵ? q9nkX6{1j5*U)BI:J5+4YF)_xwY\-r ܢE”UVBkΎ9:srm໿LAx,-?5ox\-xzRm%_^H,AaoiN];x<}l/1x.KVN # U*S*nrʰ.kax8 ]li<=L|ic!FOQ&VPVlUڔn<>{hx/ /_|OnA#O|?ÏHִ/OH/43W\+QѬ~3Q(Սh.qt+*7s2K*{*|k-ᾙk ~⦁`v{gaX0\k[^jwZv$CJ%l|9fsʰu& |l?#㯈-?ǟxNvNR6't)# [xf]4_h -| [i:7_8.eS8xS(8֡e[q49,d8ӝ.INœ0SIJIO57>qoK|UpxL¬kFYtUY֥_IZ.Fi)7]pW>SU*}=47h >&/>&#*# \/໏'uaNkb#G(VBS:Sķֽ#¢V>KHSRqy <q9hT֤i!^1/~55'T&94mJihfbl?2Gi^[ekeY]h8Jj J.b)T9^u*Ju*JSslxöw/_q Ʒo \z߉O>,񎽫k,t.m^i:6x[ 90iṞR=< qPBZWہ~&xŞ&Sc2*5Uxdsር *َ&LeJ7~ 40Е,7{_9}7P/3I>57Vo.5WO*Ju׬4K{(%\1fjg/1X_9( R4'GMƼ5xR(VVgVR 2<b)prQN)Y|XhaT*phќ(4/|{K׎GĽw ψzϏhG'u":mMEXhVt_K_ dy-nt?1pqԱ ǺT ־0is+duVkyyk:ZOm1h5V?|sن)< L5mHEt1BQAףRI_Y_lp 9g |̨qu*JѫCKIUXlNыQ᷉~7ekx?WԆmoDx,Ze4o 9-#A݌ oi\+d8 +`qscT157R:NXm4N)Si9~ž%qw(3:3([*<6LTU,:eTyի5ZNH(o_))-ޙ+G_XG>~ ~Ԟ><]apx^ѯ~:~ sJTCb#Pj(R 8ѝ*8(⽬UjET> 4kV3:ag>+?Fdb)ץJJRRkU3,Ꮙ0eUEL.5uTa>=_o/hFE? j?ul<74_V{jZ^V=:dT'. C(R)Zӕ,rTi/,_™.67 3c1XHͦ ”(Jkf8)/F[o6|J|Z7~̿ +7o7 wjַ=GH; xW`mtzw捣j:epggqˢc"N8z0tc/yPh9{ҧ&}o`0po'S6xg*Y5%IcXxx´b>]OU:5'쑨_5ZKw_ Dk,F֚8ƻk>K~UwpbjB-\]5}uMIӼ/Jp7B'R9qS [_xz8Zp\$Z1he:ziR:o;q0<%)q26.Uqxu sPЧ燗0z0GRdקJ=o(nkls/|L,I,|/O 1COxKtM:=r[MVv>†UjpwBrN6z18m׌a/<bw&DžRe|1ggi5mthLz^ 7FUZC b׼Ox^%woÆrpL2(f'¶:,4,40^ U[ )Ux|S9wżKx,Q^2U26YJԕ 5.X<7=П NK%pzur4M:sJ1FiTm9Sx)SӝZug+s E˃qkPiF6ӄaGS:ӭIEc ԠJZ0ſcNO5xY׵)Q8j-[\n;S5wĺ޸Qu Yc{96x_8p.' $ 8EFx؊JU*U(ь)ӥAR<ƾ=ޯ;2SXt{m7OjN(g,E,Lg0jy֝6|s*2t~Q8VBYzqVΜQS*NNJqSTS5b>R"^G| g?_.h>۬$⏊~=N]~>$~| DJЭu-9P,aCKWф`)ӭ^NaO18PJ,=JTDT<1_W;(|Ix%Rl> TNU 14kb+bjAIA:MZ[imII1( (@BٰzL= .<> J = 0TQJ *TScE$?xV?cqXezV+RUk15JjMTV9MRmݟ?fϋ?ÿ$GO>:E)y./ hƯjkˤsEx佻1MqYeTgq/QBcaqPb-wBtUGNB37?g? |4x7P5H>*n.u-3J<)ZXkS ZhPxNӵ^+:G4_;<x5\CUbiTkK ,\eN7dJ:T{x7L/2)dr<u0(:seL8rT^jU9[ ZUzMYUׄ>(|9V 5k;6ծ5kiega,piֶ}aͳ6K+j?+P,W4U9a*\gI{:jpMsYCÒ"r8D԰^vB n:XLdc[*҅\=Zy^8Zzo_?%Bo ~ hK'{;FGloxP5O|s^Amk&mi>ož9V/3|jf%Ɉ ;u#IWIT^U JlpW:0YCF ERiamFia,J*Te+G W._~ݟ .-5;7 ; U&x* q'ۼok5鑙9׵]>nL fQ|o__Yob%1k8NtWt.UUaiNujJHS*sJ)SOZ8# />V^b#ա.jZJ8j3QKNjJeշK?ggg g|=i ,?a=#ՕiJ|AG#2Wẕq.q1pyo-\ ZènT*G (cB F/+?o幆oC3⾳TqQ9Ba^^ʌ0:Xh8mkGf4E?h:4O e ]\#A7 Mqpl/oWڧu?kڦ{5 p1JLQC83Y*uBQT(ʯjӪ֭^jTC8Ǝ4'K9#`rF>Ά'NoNP^O \Ӽ;mEH'H%HuZxᅭ SHƫY^ Z\ag&e} qGj,FLqͧ<5JI{<ƕjJ1<]b2" βGe SN.4eda%*\ß Cs~|u-?V3_ jr>:S K}{jZOywkWf^pXC9gͱ18Y_CUamJ5VJ# jKP/F:s 5' ~E`13rf8ZE•WF,=)եB=&U #x^Ywkյz^+mBsW(>8\^$.<'x/8oe.ZrJ#VukPraBBtN0,>ѥGapXJrTjJu*Nz**ի?ן:>/$ULֵwt}Fҭ'5m[S/3M[ᵳIn%cyT>yZnW 4(pVZ TRԩRrj0!)MF1M?J^q F#ф֯^)ӥF8':jԔaNNSbi/ ĞJusc-oX|gILSSh[K;kYφt+jv*y6KZ\sSq6&]tڋar_ZjXCFci>H{ Z <Kr?Mళt"7ݙ3,$$IJld<GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxWk/wܟOI |}uxZcYx,~ "?\7~2%滠kc|Wq&u8g߯cU xe[\/(ʤ`aYU>wg&K~1feVf^cn%oWVT3T){Zg?gF:PA(8W;z_?pĺx7DwlJq8sԜNoh+#< ఙv p|=J = nYNOgF!Vs%rZ: ( QI5 H`3XBhHi 9KXM?x @~Qҏ鿳 o,!+6{u ICLGЯ6YCf:KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,{o$=?I7~nZiϩO?;`]q;aIƿ+o"Ϳ)>덿6~S+ޛ+P)=/g5b|?<)<={1zT5ծ5/QѬ>%ΡJ9> È,~etO1O[ u&tJU(ЫZ:q4T*=?~~ʸK'|5kSrex|v5afU0tTU40h*Yϒ*T7 CB (ۛh_gX~ޟ~7xN^㯇Xм!gxGS!<mex:xY[cKңЦ[2<A`2<}\n0ֱmOk:\LZ05.i'9~ ƟG~ &̸78fSC9~.irLexPҝN|EKՔH8?çd4-~&#WEo?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCgsao| O+ƿIeSþ|)X5|.q|2 gW֛n>)ВpYi=c{Ƹ6/2T``9rc0Սj ʢJ0j+ }? xq`reKN|_aOFr )J4NpX7 NI4,I#3#gwcfcXO$Ÿ  ( Qگ ( ( (( 'u +5 I?EfI-<x>#1?ٞ慡ڢ .V1@P@P@P@|`Ais&~YW*Z ω++"u-O^wi&h9/$w#,嘒MzP@x&c( &~c'_5|i| <]_3Tu kpizʅtM4/+LlmҀ ( ( (?ॺWB{(.?OᠵO7~ h0i?GmqCrih'౾ǿk |1x[ OZG<7'o~/>"/|!Ҽ0_|M_ˣ:k֎4n 꿴;=\6Ǹ>47ྏusJ?gGѡ]Iu]WMӑZ@4)L~_|[χ? ~;?ᖝ.߆ Ox0/9kzI^c{ipG$ɝBFSIZVI~>PusOwjL'_K{4-q/ j:ީޒG2}gh2}y]~Gfu 4_/` EllB7߳_g߆~4Q?JE{Xu{g{x.ai.#b%>$7CƟGϏ^k{ ?*G?k?~,ϟ |-}O7HUkGNѵ0^>x|{O*g"wsh_qxHbC|chvu%Ζ72*I^/g/>{4ߋzP_jV+^zνeo+=+ #Ԯ|;& ˬ]g ?_xz=?W 1w!Z|!}~W~Ο g?V ]CI;LFW;1o—'u5Դ𿆠Mi f?d|'LzWij7 u?xVs[Ok2hXqY\|9vWn-g__xwE>"Ӽ!|K׋4o^>xw>=Oj׭wl!}D鿃\/sBƁCx{Yh ޡazz>!UXhΙ/D#ӓ{Tmo+|L@o߱?|"5_|W|*&4Kϊ'5škZ֡o.,~=k X<6[e [gu)'|^~$K@1 .Q ծm.G > C ƶ)MeDGO?wďL{]>|]Q^o xHz  5sυ5}GAQ#%o/ x_߆c8]O|?ahZ|QaizƁmV4cWdԀ?A E } ( w ?&oSv9*(7 @g >|W;7*xc~+P_5?UoZƑc5ޱښG{vPgjv_tO'm߳_4_>#|;[Kŏ))|9':&᥸$kyBCyq~_J~|J #LxWG hzڮ|Gi F; K7O4I%6ַ3{Fy~>3~η{_?|EٟDh|o X.~iBRmoN4dl/)nmO Ξ?QĿ+>KaCWQ;q=ީӦ}_~ /mnI_?c/٫OIq].>s 39vixbEԬ{»X оZ}3f?^|aߵWSO3Ĩu>*Nwi~h| OţEլ@Vilu(n5=:[Y}|l2~ iO G>m/|Kc7_{7?7izT<'+MV "wYkEO ֯o60w3q_ ~)6dω|A75~˿4_W<: ieӯO'^)-[Iu}>uKZVca'G4o:kfwö(o_?Ǟ(6smj ~c:bҮ}+V#Ҭ KK:\ W?쿴ׅ5o\Ҽ1|]> xֵzz6]gIE!ePjV7u Ӏg|)78|G78KDO|h:H'څf|)i࿊:^:ooxSFnmFK`b89<]{x?Qώ<#ߍ?f_:OOY²gXZܰ . \IUk`>^|7{_ kkƏ~//~|b"wx>8LJ45sO)ςa%&Yd&a?ę?"|PM">|++᷈|Gi,چ Cx ˟v+m6R{?<{|-:_٢ogQ藞'?xk+4o D+4]j% /~ SxY z-66}H|!?xUOǺ47*W|oGG|'nZ\i:RxSCxRn`osi|>?̿`ĚM Ɩ_uMG U~ݟy_:|c.>v/ aF'j̪yWCثxnYA&0]KO~~߶ׂ PMPG}_\sɞ?O;Cτ~>>:/,?i_Wេ~Ԟ*?f7O?+ӭ??x Su^5~~ßs[)S;#_~K1oxH?k|F6| ӴZ _ٷ෉?e~_P#gO7$+ _>L|ogL$K{猴E1|_ >4Z?WŏK ;{$/캳kX$`#tK|F8O7_ i1Z|J վ1z-x]eƟ=NĿe?e?6\f5վHk/W?MǍ-~xFmcQ bXп<+ jdzb}];^8]i5_~?S 죫߱/tM>$| ?kľ >YkKhfb5?Sᧉ~ 9^:Wě~4}3>7}⟅>+Լu6o/_uHt D̟mhP@_o'd PԟdP j߶/1N2~Ҿ8?ewaq|Ct{PݾG xVPԟN[]{NXxAtF pJ|~>3)3L@~ |fxO-KڞsxsAM:.,*Q@_Ij,' 1C@k>:@|G/{~/ ZSjtk὾w=" 3_mZN,v[<vH8ac?"㟌~ U~xb_ 7qZ꺎xRW"tD_@GΉ?|cA|*OKgh/t]r΍ NZƛ&?.d/'o>Sgᯌg|;Έ|?|;vzn|&4O`k|s_hWzOv.~?N]G ه9 7/>o>뿴umtN|Hwi7rx^;~^|l>STco h_~&xFEͿ!?c^8P|lDžh__4ߍTύZ5|j#4/^qaCPnxL4B;]M]k E } ( w ?&oSv9*(?AAL9P< ~?KaOڛ $DeV2h ?h?z;R4/l4f6cRn~:ɯ_R-?koo1OS7{o~,n_>%-6?|S>3:.=_ >.CV׵}6v:pN𿅿_Og┟o?-~_:oGD7>9k|J, -4kqGE~x|1G/1;׼xƚ*|Qyil,[ht'|WU]7Kzx>wTP|+w c:m ~-64k vCk][HԒ'Zkx?iRYo$C`/?2.ǎ4?>g嗄{cI֮K=JľJm'-e|9kjQ>*`ߍ? ~YWsŏ &𥯊~!x?@.xSjd3?jvo~֞lIеk_l-2Q_~80FqZx[X>\'7PJ ??W:R (. qw-[G4{KS>Q?o1SO~7' 3D:"?]t?Xx / '~'Y{O驮_Aϯh~X@<}3 ߷z?v/5e[˨oLV퇆4{OivZWe\?](l)wO|a_ OzO {It=Śo<)iU6/.5k3~+?kInE =M :",q4M[OV(]ͼ[|8j<<)CCx_Oϯ|#%l5ǿ]SF>ѬAszޕi_ikp@߲i7Yهᾫ<+Y+1{MD_/UBxt]~$kA`Gi^ψ_č?~g/ ;xY|=Ūeox.ZTex7Wz~Fb"?-oWojѼm?>.>=<~ssŸ7&-4o?jƹm{O!fЬUk7>yf^j< GO~֙eQ|j /i֟ 4 k]*XҼM5펅wހ4Am Vooq FC 1"QET8U4E ( &|*= ( (9PQݪa : ( ( (>_ (mȱ\~u~P@P@P@{&Pٻkkjdm' @p (bmigzbA_!8wgv P_o:*/?Ttu8ʰ v (>?`%'~ߴj/?5 k'GAd5x+ ?z~ >  ( 5iswǟf7 }>U:<ٛ#ܻ`sҀ= (>#|SHv>aKχ$Tp' ( ( ( (?3kvN#IT7]#q 4P@Jx`/$GGQԣ OVz ( ( ( '#G̟ OÖ8U($p'@@P@P((@P@P@P@/@P~Ɲ!xBk}[ލx/s{@Ьso^@u[hִ y5owW~ u6eĐ$2|C)7/RbH .m Ksx>O xL4 <?tM+L94k4B㵶,@m@P{K[[k{+y/,6ex'xTxYXړnvoO~џ]s_7 >"sZ]oqwt^M?þ2ӮsPЬ}7Z+}_~!i6x^;xDaxO|mxXLj9u],C-G>?/hP@P@~(ȿW[]IP@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P((@P@P@P@/@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~(ȿW[]IP@P@P@+#20!=C)J( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (OFB? 7쮀?l/c"<@?( ( ( si[Hy ݹIC$OpH?@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~9:sXnb[.{,<)P@P@P@P3u #ڗ۞C|C$RE` bG"ta؃Ђ$h*( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (; xfMb.Pl&<<.Q?@`?l?: ,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~οX(5j):dΉ:~",`E`b @< Aʼn=O'wOY"8|Oe3?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@E(iH_ Md|_QD?|B}K)&ট ٢ghHf^@3+/PA (OأBq߳"(*"F @q'c~d|Q ï;'W7Z|']_GW]hfyVVW3manila-2.0.0/doc/source/images/rpc/flow2.png0000664000567000056710000007367212701407107021766 0ustar jenkinsjenkins00000000000000PNG  IHDRWcsRGBgAMA a cHRMz&u0`:pQ< pHYs&?w#IDATx^ $E!CfoA@<("Ȣ0(=,.l31 #8 ( "*SdVFUeUETu]y9OƲj|PPPPPPPPPPPPPPPPPPPPPPPP`p ,v۩fϞ}Zkuc6 `fͺqbbҩ}+(((+ h=rbo1cpЋyq'.3o64l ?8 3>?%l-~~|xu{Ofzq(((c y|!/`eClF;X+ˆX, dfMܿWF c60~6pg*}xkfdۢQp@@@U9*oxv m݀GY:wqΜq-m3@@@3g/y٢ݿx^lmn)6xӥ8 h*~]ț?n<;6 ܳb-|ێfK] d̙kj_+v>_z /3w]6EV!T2o[[_7wW\3MiPJa^[džM45C`["MNmFܒݖoo]hGIPP 3"?&pU2^U̞=mHHADǵ)Ǖ|%l`«ޓ@.(0j 8@6 `M;P/`epD`q1@+6帒 `@u`q1@$`q8l`6p+[bK.B_>eWDw6K<;Ц`؁>r\ P Xe5b{>Q۬Yw޹6;Xtݍ6ڨX*n`ATz7lT7޸2lb酗<;AvM; } @ Š[3޶N*N9"+V&9sԦS#8XhQTZ%dMe˖Ew/tg}.Dh!f# =>D(xCPIͰOWl5 =>D(,3(:mk,a"gm"o@Woh!&l(Xvn`d*SNCB @& G |;X6fqP`XvPOA-XW0|l(XvZϞ`+5ڳE |i^m`siTΑأYRn!.ndV}_p@w0ox Ālˠ @ 0o9~vizɔbw(M`;NA>~i#y Mj`1j (Q?tC1 Q"# {{[+[AW`, _σ*|~jš{rקl,pѯ|-X6نp8{=-|t7cGTZ}ݏC0T!v8ݰ*HP<,uX #rm 0Hi*ES} VitLii=,<]y8*$}g6ƶX4}.Xi9vݔS|'6nU;@`w4zv  6]lٸ:`:;zQ) Kl0u+.UX%iS6_{hR3,X`'EءLg,s[ه5xVj ֑lj4UZyO|譥O-i >5ԒO VJE`>Nv@!r]/G寫y]\+SVOՌ,6DN,XF 2gDOڎcT 5gt`l`V`ʩ}3U.#f*^XEPe~MGbYlp/KZ >`dׇK< [_:]" l]\w;gyYl1yl)lhXucRk)  *{3ܾmU* .mvetF`#}#P  o,60@`a1ljeC4f-a @gm0|Ɇy_Y__Ua=XϏ8relv8߉-i މ(#ƪO]NX`bor;Ϻ*VXQ/̙SN 8bѢEQihM6)-[^u zCGߨ}*)\ݡGXk?y+vgT+Cl6/n[~<#?O[m3=度iS66v"O}vD`31 ;>u=;`؁l҆d6&NYD`g}WݾG֛= y}F.^Hw^*kƆz^5 9`}ē Mz+ !ύ!{ݒ73fhu=jm 1֡`N1u뢬ZNb\ډޯ둼Z7Thk]8QH$j `OVTl5~Xv /)nA \\sbM7?ˮX 2uescu[cWv*j==-|* ^}i+ՑM܂{s&ٲ`pb= XtMSN)f͚:xU' `XV, `Zu w/mU6JuȡizI|_]wÖS?:`&s-?ox+_9 4-GpL, ڄwv{n: ejaw(惮޹уtLinU_oDQ^_5-?-|[9²*d xfLquQl,.LaVVk;裋}{ŏ~MKt(:k;W\}]_LO lmY9&?un7Si xRZ㣎YJ ,|yj1;m:_u;SkJcmʧ!6>.2/7MDqmʍ\@FN̄]xw,?qvy9ldzK dI^u,H{&lD@A`؝X(4X] X]5A _@qܪ,~&Uy4 ;@PXo C w]3(~t?(N8UǪ;φn`XV)0[ 9}cfl_R׺[:=_wN5 8y= k9{a$?y[|[-Y5. -HK`ս7˲U.U9eY䶮 q-:\ Gݟ`XVAjۮ?_owvֳv߳[f~#i, ^6\VﶺI..mX$O!]+['[ [x;v$;IKZ~F\6`]x֬Pjos9f31,;j-iW2:;16&Mc,݊V۹#Jg+t g6ەIw[w>vu`;4l=j-r _ַg۴ϛ鷿t, ¶,Vhg ~Վ2q:ơ3[uuMOs4I6ѓ/6 qjIOwQp~s;NvVBu Ewu #29t v ^9`#ql{yu5,N??6Nôjqn*_~%}tܺ+?c!6Dޮue~[K^奁mMR3q`;؛okE.6+z;W}g:G, {ۺ6wtϷa=ٓ8 b|"1cF.vdIe][@VbӪ}86ŕFF&J`bwܾ/ ܟԧ7߼ws[v gS`o[o-.]W{zhh㪼ہ>;5fW݌>w:*>6t}0n~~T|h97Yy>bw.MnZ;׿6Eciv)lT&1nxcxWF{+/8*]e^9,ԁ`h`HiIoV݄} {&8뮻Z׼5nşgGyd_z.myyqt`_ [#f]M>[B\fH&%wp1wpM:Iu`6*@m6p0vHť`08䠃M]zqM&&JV3^[<.絢oiO<5.VewM9/n'ظT,+SB<*k]czΚv݇u}v `È߭8URe)Yzأr9:sTj{^Z?я[ne񖷼noۡlńoA,ASN| mM`؞V]dqf7Ty1]ݬdW`4~bw,ji:E`؎i'vbm`⁑Qn|[Zu+V(4_l%\2pXmFm`7Z1q`v~;wSq*]jbNj徏w1Vi3 ,>ZV3 c`-??}<i2gIfoWn۟r-a⧻gf`w~_nyI4ֺ[V{}?я ->/_PT>C5\Ӛ-,5՚_-|H`q¿wSC>lt3}KMȿk~gdK'm#n}®7tf# ft/v[-zscƦfaB(5X,˷*mlo`WU/V3^tgr$C'>Qlu!=_M^xÑG7Z\V]xӟ]vYV[{xxG_mV|K_j;ëM` 6on2`iy`Ѥˇ-O E[ [@:}mKj4$t7usbx zEakF9uM'`<a-[FcXΗ&)m=Z:lc^׬FH?m s=7`?ϊw-G>҂^7{ō7ؚL (E~tS}AD`}o+v;mfޝ-?uX]}n1񲚨hǝvx٘u`ΟJ1B:V5+sY8d}v!>؊r!Ů/fʕ+[}kl"cx=}_p*?|i]vݭ}㗶)ʩ:"׹گs۵_O۵_ ԔʢoS:uR2~,04- k`6b|MdZ |/6hbxn:[r@;Z6 `52:6LGfwON27*vi? MOj!7Dl}ۏ WL;QA F-iw4 @:fI+[WS׳4VVQY<5+_K n{ylt; FKE¾&=rɋݶ,mE'fo3`:viy:hV_vQIEF{ꢐݼC2wⷯ=-;8̍ύu[o-^W/]sU^VVbeV}of ifWu[3+2f{!ROG>kp((RTZT p-@Wf8.BoOgye SNFlT$QbwUGqr>9+M|x57UvBO}W<د녺oS,,;lE{f3gƝF`Ս8t" .lu"<#ٍMu'7`Bܐ558֚+/d ^ Bu{EOSVSN9xH O(:vZ˖-ku+^hQUXU:E^SX?~KwQHgPhm|G7?ܰp6lsvFlT$D6 t@OL|;GL$, d.jfb-7)JNU׬XM5]-RuוºunVd_6m/6[o?>;;wnZU)f~>ǭ 1U&cy i5]ֈ tnl8fVleyB>jRzXur:ֆ>sXո;Vڰe0)ϘtLdlWI?mMk9_\wKtI'x`5{Y)Q*žzVf$ֶ/(]}n<7tbDhS2k[+8 x ǜ*u-MdyҔN_WQ\g]O׳Q_>JdTMR nhHԲ9ro[D '|r曯 o^Ʀn]sܶk 2<lOy`&Nr`vmw `nbqد~n@ws7Io|cq}Cnp X߾Jt6VE[n&J6R+-IcqT;G"oݷ2/cݖ_x-) 2:ͷgfVa7Q ֻzm.== UV?(x`uYlf42`nߟ uS Xt~O8a.7u\<\o\lRnO&UeA0Vp&4QS]v#A㨕l~cٸ~\'25 ZgncnW]y{c]S\W`Ea7sT= 4r a57֕=5=96S+2NX=~Գ;I .ݒRU`$V0qT9yw6??:BtڧS0ky:}]YMj]>lpۉS^+_Jk)+~-=(~mwn|yrJCه)uh5~]_?k6lzl DIiTR'lt; FKEBWXBlQXQi`} yj_Z 뀳 >R^{)Y`m?]ϋ\4nWp@kb[dϫ9ն&X[sun8[ Q>Ѿ- k8'IPl"6X &(*Hֵp AuÞA6|بνVպoU]vokp*nU Vwviuk_Z+n,PPWU Ѯ%ڼ$Dj+dQbJԠ]Vk]` f{6;1لu`5A&qRv1JǻtT5Qԗ_k'ԧ*elu댓-l=?;J^6*`Җu 2,4е_E`mlٌ51P9k]٦b +n//|akYR?<ȖEQ_ᆱj&6ؠַ؛nmw AZ+Z{7^{m/}>WlF;HG]{Zcy]UohF|;M(P$Zw`fIlV&`R:jZ:LON4udPY-}Cn=\w`EJ7qcZ"׺I5לؽ]w^qnҪf0~'":GalK` LTomm&G`󵁑XE誺m&`עekhׅ؟MX{[ɚ`zs=*vw^zn?U]˺ھ<;ZGV3 ˶Tv酗T.ޠQ'|;M(kYl,Yv=m׭^"^Lbtl0[;TMU `b+UXiok.g} `5V٪m]Y@+zЋ^RVv1c8mW\}ݦz)l49 4 vr> ;y>~,"\=8j&}[u.Nq9裋]mXgDQU۠Wo^w->ymvs6s95|;M(kB8.J=l}[dAt>v[kVagQ?kt^wZ=uZ_lٲnÚIsλ4ŽM7%;Qxvp7WB))aS`؂lO(9^wuŖ믿J`8@aZ{v밺 enmM'yҾ7߼&W׿^̝95{~vk__r1Tw5Io}G݂rb"\NFYWO6>åNMBVllPޙZL t\7uccWz3ֈdKW@VݎMTvL{5״R\v~g5ɷ*`ϟl"uC9>4WݹM/^?);U|A]Sk]WnV@V/| {\p \WGث;j7 lZ1ȺkѮ- k`D|'`Bzg `o𗽬x/1~Ӌ\%X8)I&\vL{UW~{ M>Yp-6`"! 1 ;.`iMZFon*馛9._ܚj;5i-^vemӆy`Fl_ZݘΗֹ6ڨ5C0˺ASN| vhHblciG-e%aݠz;\xqq׷V2}+635U*iVmt\u39G AgU>+U w +l:uhע pW;-[ &}BFmCyM %m\D`_Yubu7I@v]7~Oo~X`떷{k2'U>E{]K`nD`Qqnɛ9s>N*e={[ D*2ΜnY^O?[O84/u?uMU/:4 >c\g`~O,t奁mMB0۝3K 4vaf![5<Κ՚xG|}nkwL `?϶Ƶv1nA`_M'yl .q>m lMQœ6U%o = `\N`sW]uUkv[^gZ `/B0/~>Gz6 2+<.J Mnʌն9e y.!S Fba:1c,[G]h X7qYkPsNum^}]mx`q콐7`N67xwxXY87am'a4[Mx FA>[o}&umOkNjcER;ݎ=BKLklq. cy4lQ\!Tk&rР튫+ g>Ph9q(*pXZ|WFoZvivQ]`t,5 ,&pZ؄+'â@;XM̤H⨣j-6E]T\q_nZgT0`X6C!"' ߼kEqWn*nFڟsF-6@%ՏRPu|`${Vk= 1C:3U,;Gw=*jYV阺+w?VaEzu[´lOB WNE`dN4jN;q{_~yk[ti{NfK/${L͙dyXkHYUn~b~}JouUzKc)w/`|Q*o,ls_Fmb#"߀O|-/~X'!؄+'âl6wy&`Rbm/vK̝;zYZ)7ڔ` |rlتqTE[ `n:n`X6`Um V0 fړa}' :0v, &dx۴ H ƥPFO`kK^V;m.X;[3'SPP`krlFS鸓Fb;GFnn;.^]uםUX6__̙36qE'&˖-۽SuZ?g4Nfp۩lm5XـN͝7w{{'C?v|p_4>˝``ky׊kmQo^xI60RDKx\qK_(S"1}ф^Ko`pnPAV GyvAk9. r)lQ[\yُ&J> ҅XA} ]k`O]sT\'| ,׺wڅ`8Ku $N` j0#@V [#$cepgt^x*?k)NAVٹo}ؾ `>Uÿ0ϪkuU~G>ƏVՃPVŮ>l&`~`ؑmHnևW]u /8GfIoM&dzk-2}n TemP; /mU`Ď۠ EkX#mZٷq5*.ku҅Xl۷ozƿײzu5xViUvl~`5  d +m/k]15 NP +bM*  2 |4ТzR??5>)x;Tf+lS:ǠJe+Ҕu!6k*]̵bGAu>CmοgrڏUꃿiaew; CgI6g*-Pr>XFw&zԤ|mIaQCȮe=бAaYuOv_M i 2gG=IOŽiT`k]2'aΠz;ԢeN5дb}K;ԶVUCiPH(E VW~i`)q>{`17y}ز/|t;~({MُC˗.kFjPaztY]~|,M>r/ ZwRWo3`bB`AߘAY;X ZUECMFۢcPU^NʉkʁPՏd???mwV^Н-(, 8$XlDmMLv=n,nXo`^?vZ U >M}*'a4[6 E-c%"a~4vzviҮZN" Eu~8 l *Ch?xCbC?~@<o?l?`ӪK&x^#@ ?.Fp.l\8~ +ˢ\-I_m4c[5 %5'F:f~:'x`˟:2wu{X{&h#P$v*1[`XcѶ^#l6&,_hCX4~7ܘ?35e͵? v_VkUVUׅαz`.V^~7eHit`؄|Q)J_"6PSIMh?HcN"zYuQ׻.ģ`~ZW? ҇il=аQjf1,kFlLmx{W~LVk5y9az+[/@?tB` (E; t`gRu@~-nz s{z6Yc˺c`;pO#ilT$PQ:"XcT4L>X6m&Ig `.g^1#c7=| g' ׉gج{ {Όq q:*N]e2rf]H3RX6y&U `Vl΁Nz =xgY)jȩ]STf^YI:̓W`b`ؑ&R>܍Clb~s+ 10w 3:ͨ2(* WA6\k` |Aq|^>lۋwnz~wL o6p j36(0dXc<.q8RC]`5qCn.l$,{0R Y2dCy @ pZ3ywu4R,Q[|H[.-ǁv6mҿ 6;suBJ(hk*}MS~GP̎#_H.^XlYԶ_|qmڳ:M~^{V7p3ΈJީQ:-Oq&>{כ=|ZZX)56 H)0{9w֥su|Ӡ n酗g[m>s7 oYN~…SJ哷*vQ7|tYW\}]6",P<`.6n֦Ȭ"8:ǠN\EJu҇cG혎oWޚ!tct]+gg]}6&Tcڴ . я QP(8 paw[1HU_ȭR4-s$ʣtSl׷%G4lYذˮҰ۱`1P*EU}ؕ"ԇ)lyX޺2PZֆݬ꒺l`%߄]AhP]Ɯ[ !$[ Sӵ+U?6 p elt loB6ߺ +>~2g #uk -16 btk?ln'm#֋z3尽]g`|]%Wt>Nq* g!dʢÏj Nu S12 %+SԺmCz ]`]&vެYn9s iPu||o};w-a&qH>eXyjMo$Q~e'n~7L!iL??S31TY{yfMh=?{beh  GZ+-;،+/k\""-'3)1GflE;met옮<:gxGX~2:ue}O)_Rs k7ۏ@gGyT}6U6FgѩI ,s&(M4%RS57XBiCgvX6U&ӧrp^t `5X{Lo'6Q`<6ɰh,J0Y8u 짳ډvkAup|կInQ fe}vnLԢ3*Iܰ;-{T6]콡rW/ ܦ )hdz fQ`A6ɰh,;AV`Թq퇓XgSe*sqֺǺ]m4'Ͷ=sַƆzaxn\DMΦ6a^mMe`݆6cߧ6Bg#ȹ+&\l•aXvHK?:ol8qA'󗪒f7F-YgYb7}ʹU+ʆTMtoTpv #ȹ+&\l•aXESsG<6X89} d2-k" #Uaڰ|׆ȾVm06C"@`oO=Ɂ, @lecam~7\ְpWl8N $Y}r 4 C`gP؄MMr2, /}:w6 -Yڃut* ' `OD`Iʺ{? fqPdl)w'8l 2:u<:#}$q>+E^Vllb#wjkα ,kYT\U?) ֵíRqf@3 ;p/kEz%K k@je4ialB y~fkww6ͱZ~~gX:P5.3lxzd*{^tj@l֡GR`>K 4elĵ^UKTuNUxS?i:N֕]^M^M`.3|PW^}ΟG`Xl 1nƃZW9MA `)6Z*(b"(0& , ZiFiU_k.F{QlT$`@), `@6F;YlT$`@)p*2<%¹)6 h5 3!f(0> 8rt. - QPPS=(Nl`X6FlT$D@@qo {u5l -`] 6Z* 6 h(3\яϷT.iE/&Q6 m=- Q _滢a(Р,r2e~lnhH*[wեNMBV@;@i*Y/ 96-gm-+&\yl•aX5 dΎ7em#ȹ+&\l•aX`lh ="l5&\9 `q\v\^BC4 ="l5&\9 `Xm#ȹ+&\l•aXǵa5' nlE]6` , `@6fQ`A6ɰh,ڰJa6F{D)I`6ɰh, 6 4llGtK9/:5 QZ6aC~Ghy)8 ;G(?Olw`.3|PW^3?ُ, 6 4ll#FKEXLDqmq%z{ 0w`(6Z* `Xla`,6Z* X )0s:޶nܣ(H 6 tclwFKEr'P PF''sl(6}`"! x 86 `MbRPPXLe>bRPPX>8MEpȇh 6 .- Q _&\ȷT.:86 flt[FKEBWz 4LyIl _`h6Z*@ %OX6_G'lnhH*[w)\)Slh5 &l"l•aъ ܗ":86 fltS FKE㓨lkة`qSs)6 kl[tK9#:5 QZ6a` ,R:6 4llE]6` 8 ;DQw]S6fQ`A6ɰh, 6 4llE]6` 8 ;MEpȇh 6 zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎ+Q|fuה zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎkS! klG4/:% Ql&\9 `Xmr)A^`{UOUA|P XǵaǕYQ3ꎺkhe b(N`Xl6`"aD`1Xǵaǵ 6ڋ`"! H`Xl6`"! H1f\ۖߍװG)uGa]RF}(c۽v6 jlFKEB@@`qqlhh(((e.6 h(((qm*C>D|mv1hH*0ኾ4SrHS(Sw6 m=- Q _滢a(Р,pj0&|mnhH*[wD|mvhHb(g3gm9lmvhHb(R`S7Al.hHX TB@QN<$6 - QPPSQɧl 5`] 6Z* 8ԜF`@ 6FlT$D@@el`lFKEBW W->%G4 1Q6;F(]AhPv4FR6`"! oQ`qzSpz)v >- Q _|.ŒbQ&v4FR6%`"aVL*2-w`qzSpz)v 4- kXOR iK`j zlvEKEB6_`K,ч%4Rp) ]4 M؄+'â, b6а zD9w؄kMr2, 6^ >`3(r pdX4`Xl6C"p  WNE`X׆T"@h$60<`3(r pdX4`Xl6C"p  WNE`X׆W^Ëz=ڧblE]6` , `@6F{D)I`6ɰh,ڰJrglG¥ozUU>?mnk6_e*`Xla`=a]W巶_9^MF[ SJt6^Qga=E3yw5^ ѮN}YK/C5}((0: spF@@HRzhglM=B@zJvW60j6 ((0,Nw6ω"B\#?XQsl PbP *Zt=lQv yrk\ P`$`qǴ~il`x6&Ѵm\P= ˂4>v r@UVSq{!og%#o\Š|reg`d=6 # ݊~j+eiTlT `OFouG6|9h>֥€X?_kܣmRz][ūAH%+zhgl*!U]TS;Ǣa[j+ >762,)_23.4J wa _ayv^av{q spFرs-kA,ł$,jc>~1x<}P1cǮaU2E t=p/{X(塏;Em-=O *@`lMo,j(U~da@kUBǂ/CJgk t ?+Շb[&M+)ڋ@5XZʻW;eׂؕ2\nVtp̬e%%;M7K$@is |~tR Ee_0M@^CVŁ5;F}xZ^f蛆KD3ކ<-~WfS"6Gvx6•QP9z8h `fl3msRգ/ z*Wt{6uXa6FL " @[XQsl ct(Yz >ۯway>`T93LKul͓G)fMLu8|sl`Tl)A@@9s؇pGŁ>el`x6S"@@5?%Çh `b8H>((((7pЋl yZkpZ+2F@@@[oJ8|60*6pu?eE@&ݭ a6н lrB[@@Q@ XUwª@ M:R`a7C;lgm݅!P0A_t '?Y>>9&+[]rBVT־cؿ?]4?JcW3ohƬYg{@l̘1M Kdռl*(چލ=t(eЧ_d^,UWS1u_9:麖Fy?je4q5&_5 v{p޺s ݰl`\m@_gF-'[}nq*~oY ܉Q4Y) '?xE#vUH~A_re+*i #5:a7 `@6:qoC&&TO@h`%(0{Ծگsѷ҄ gst;u$P!K^,UXh'k%å `>zX(=9ݩY)p-}bl>rE3gu5fq r (h60>/zr@>o|_Wi /-;ς;~9]ߘon `~ G=P;׺ۋ?K#g6k֬/Bt@34qq*'dO@P)Pϕ#*>\Tn:CW Au!u` EqrĸWxœrk'քQr {VE&gVrW/u2<^~9{DRbi]oAu/Gk;"?ֺI H ÞN;v!w l wa޼ ַ f>5ȞXom `AZ(Pu~[7a_pv'u$Y{u=<&FS`bb6b.U@,clso>G8g-j"h+R >6螀6/ZY4պ6QAm,-fX$@(, tjz0zUVČՃ]"GSmgϞ}Q#6 `g[̚xx?k4p.{*Njd0:[ClKP镶]/&ݧV\6l?z䍍-ln6Ef=s:akq^ D +GFva@.6pGO͞Vrkۄ}(i'`0,CO ,rփ&<ձ0O|529ul FO͝q+PS(ؼl79y+pZ᭷vMǝTh{;=`3V6 `y#^{[{u~"wxϻ٢S~3zukw=SwͲj_y(( * :-vߓlh `@v6`%SuG2zϞMS6LWA5PPPPPFu޴~WON.a&/k=? G3=AJsf2|PPPPPP` N­Bi=miXtLZBB+L^`*AYݢ~(n;qjoaܮAꤓ:V9PPPPPd Mĺ{X^Gi5T ([*u<:VIaewv F5A@@@@h\XEa=40w/~RFR ؘA,oyZDW=2Vb(((((zeQH[UUs= NFi@Ri>eZ:++#ۃqr* @^ vq:WA}WMQ(F+#k{:Yqm:f+ʤtK*ZG]ں!D|UNXi2_YX=>Wi.~:LU#0QPPPPW `}5ˢ|u  ,iװ|>U3UZ2{SY>v~̤Nu?Wj`rZ^~ުVki.u _QPPPPPT*U,<c?!G] ȧcS+'^VkyYe,Y>e[$Q_O!l[}?B\hGM&&E@@@@@U0@3X#ief@,i08a@Vآ&V)ӲX,[qP76.jݮ1 t@ZӪe윲n"M>`}.ezuQ=\~t8N].' ehv5u1FԕE"{ʫ@cN@[ǫ`} jϱ"޷p|o: q(((((+PUi죁:+P궚 J8+Ֆ rݏEЯ: ӖMskPPPPPPU[5VҺѵnWuޕ]4xentK;QU7]vV_X+lB\C@@@@@(PXժbl/j5*j]ҲTU*w6҄P\UUePz®nY]lvi/Fa];yY]@PPPPPLv[Yl7pXc(\ZzKmUe͇*ѪL++ge5mMTf@Դ@418!DZM[іQ xPPPPPPQ.d>뎪og0vXAct MYwU,bە߇[rVu:Oԅ״y*+}+:gj̮3YQ5u:_*CcUzv6tW:+WF:ڪ4`UnHxkn((((((*Gہa\((((((c@UW1E@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pߎő>7IENDB`manila-2.0.0/doc/source/images/rpc/flow1.png0000664000567000056710000012002612701407107021747 0ustar jenkinsjenkins00000000000000PNG  IHDRYiásRGBgAMA a cHRMz&u0`:pQ< pHYs&?IDATx^ ?pEPWJHb"nqGçDTB *"ʧĨ QED%cHL&1Im=3=<̽oWw_SU P*@T P*@T P*@T P*@T P*@T P*@T g}N۷oFm3Q̶xn+[)G!{84T P* mm}馛}|?1r;Z5e @v7Q^=ˇy7h555QCe\< *@PdžnD1WKW>  <6o~;wf 7VzMxT P*@@eP-|166P zٹ|Դv5T P*@@;v\cF Zllllh+WG;v9ϝ ύ P*@Ȏ=6d?~`6666.ḩb鮭1;kJT MMѥ-ו!llllͷM4bf |T dCF6Gh6666.]RU&]Ґ-gWebӿoZ8+C[m:.6666 r;O|VUOl ^,2fMNj0NȓC1X.&7S| _ ŹeI&fT P*Z'4<h}Fw33~*+@e T P*V7^8e N"iRmQ`.yT T@۶>XtE$FKoY=@lllnnMuhYLړrdTfW?}4m0 L9[mFKjXT P*@,W @dmQ=aKT P hhM'mmQ=]u.%?T $@> CғњGT `llllDlϮ=OQ1eOZK.grDe*@@h磾O`Nx# jyE!F"ˠT  `u ( kdlvEl F@> `"md ,V8z`x4*@@F =2DyxjX^ u{(A@ 6666i :u9N9 Ϩ^DQ6;eP*P4r)DѱKߵ[=աCL뮻s#)_9/QF5L= N/`25휅8 %YW SF$}Pkk/]w\>ǏWs{E]䔿K.jΜ9Ny0uSܦ͜g5֩'` N 3Q*@Z+)Pf>B@K&+ ` X> U(@e66H ` ^lp)JoXa&&I,6,` l޻5uVmm 6@%ֹK |`vW>T dBD פ==<lkXl&z*I%`x$*@r(@"mKQũ`Ū\zj~΢k>;>v!euh%&G lb}|/-_#a(@"mKc'Z9`[n%!X Ib/e=8B)r8hօa>C9U&_Y"GM{QxoXlnXl;ĄG+)i3g{ `3FG\_LwNG9KYiC u A CJ,oll,Pk`y[=a/+y&b?nf7JVs`zX,(˶g/׏KB`c!OSS oL}77*E9/P AN{@lj/ +R᥽aONNXl̨O)2Eaq)@KYK7y x Xv(edzYhzQj^as"'Ӌm⽕j6{" `pX*@GD׎e$K8'B&$N,!L4<#9ݳiEVU"*ٸSz# ɉ‰qd\ d ` 97pl,@ R:N{B'b+ rK[!¶E(|}ӌ~YS T " m/ 66H `F&²(1+[ <6USF` Kا=\XP: .ix^,>TV!mvnցrecK Ј1kk7m#*eZQ&ʐ|?VQlJ#̱ $0Ɉ NW>y|',[x/ {Y7yX7 4'X*W9A@,ʇAl/^&pF{PyL$ vɉ`vǝv"U_W>װ?`5g&&2[[ "l(pP~0><sHX,; vQCd=@Գ>I! !NJLµ|靧1B\h.8[Bm20dX.~/VC&sL@_^?")OnMuܹsziV%c5l Xly`^X+}b?٬+Kk/_'Qs6b{YbЉ2n:_f5Gƽ^BlN |f9~^W_Z.L 0H%zt.%p}?|RZ`4hP)OϭVSΈ^Y̋PO,Ao"jVg_y]_{냊 Wypd?˟^K^foCHfyvR/y>\]|W(,-7So}OX)C`cMyx] JPڵkWuM7zl{U޽K eYVWГK%֧e,<GVB!M@yڰ}k2-+Ʒ*;h&c.s\,ݲu+6N!,:KSz_:tu/{7Ϩ,k3:6g!<6 J^Q `voB_%E߉Gɱfn VR|ئ7ooг> S]` -mn=hQ8W,#کS'u 7իWGLݾ8~5$IL]u&` 9E?,Ҍv9cccc`mlgYs|,l[ظDf]}!`kj@ݙ@Xޠ8#F7xC^5Gj|숑VYr,Ko2E4+<ƕ:GOP9Sc3ks=!K؄*!:ֆ ^Wm~{Yj˱#fh8fxxe^!,:d,s;ҋ/=Xc|lguf jKKZyd`Ye#|a2k^ g dR&.Z֞1uAqƈVhpFaÖ EA5 ` Q.eCYW6lO'xBׯ@` oT;w.,r|l4,_NMMMS'L㐠8`j6o|7~CB6M<[>Kyr2eNuvkypw&e(?ͺ0-028i9,h}G1z~59c&o=8;Z $` X,Ee <럸\uH6QQ9@Lo@10ȋQvC2E7)PjzPy emB|^f} ,Va"(,G?zմin<@Ϫ?uMX?~SuMx,˞8J0 pg{FB|@s9$m(6Ӌ[ 4weJXt%- 8f5!=>SG/H\pBˠ6c)t^NH]pZh0aѣ6lZjU]!=sLէOj)Y, ` 6a_`ˁOMϪYF_ޠyoxXMFJ%vvi'/aaE(gl_ji9ۗS%وfmƎ>裺qƵ{ҩUz\QD%`s֓t6nJքz,f*L!)komӠR]v)o7=ǥL S0zY_ kN%X?)i{+;6'NrYM(A:czV2i V~McA~G{,pPE<&@+chM>+p6]l6M+B,8_Bul^}ݞVZt:=k_׺_޹Ոd٢l{k`c ?|?8qBh&7L m*Ui}>W{fcmw rqb@(<+~V&C۱ iXl-RvK{G=?\}]۶׿ZmV\,XShNX l~(seHHr%؉[US$Ne#ٳgP_)Mjv*+xq;?\T|I]ɠDXg紟 6fVZGuO\+C3^a3Vj>GGqSj-(XLd{LBًLdzq,n֘{Kl{'ڴU+Z1;0v[O<PI,E6fgHu FDѮ(^BӜI[/azZ,,$N2iI1&b(Ij'OP͜;B"c`1h=uu oxٳ:KCM:U}駉>LV?O?.Rl (le=f[Y2 Nv{>)[`3i륇x4.vj,.e`ᕑL)3 與՜^F.Ky8n~XoȌwy%胔rP1Ly*q#ۥI]r%&q1B,X,f&~ bR(f ?k^Ń>H `+[l}bAUk5x| )clSk6h S4@Sk ԬԼhaRX3,Nt CSR͚+[miُ[sT@dW]oT2lX`1 6mxa]ӏ5vpCue)BZz>}ݻ:՛oQ%_y['pLM'zȞQ=.oGNGc+@@v4b,jf?26.2`Y6PȎ! p^s5j 6 v9 ;n8oJX?O>PW]uٳ3fn,B}MqG^x {I 7<Փ|؉nYróJasGʳ}69?֩&:L `M`v\xmLM,Ʊ9 8R+m'zrFS%mYaEK#;!wɘ+Iخ`-ZcȧB|g-,oHou6 @f><.k{c]|X'S$39* `fyϣv"9x6lvI9! p9?mzBWP R gV:&w}j;=&MZ}քZl^HJ]ClcKmaQB<^rG km7l!xTk{zD0Dy_]#bRW=.5%wXFtjTsϩoQ=%,}: /r `}% 7:l0Ld/\O>K%BI7ݦ `ǃYW h.Ab;|E8&62W`su9~2mSH\xM]#t11BI{uPA,p}ꩧu1qSW\qE}#lP t uh:I{EݫJ8!IB o~ҞY<[wʖ !~]wݥ0$,cilKՏVڴ|=Iױǝ:q&K-7U+-3 ^Z$OUM+9~vjY!ZR9x~ͬ?fEVj`]ղN=Ƿf`1#0f6w3br0h E~|W_uޝ[~s`TuI{@8kv;YDL{:PCnnԬY2q輪.eK,\PaJXĉ` ̩ |ro+&YXշ~[]ry-e= u 7WꤣVWn-z"\x^7*[D8`zi>a귖jr^(/x~/d+o_Wc`csqw眣~zjwb‹uy]?6xaO<(;h5iXz<_l /&r lNeoNBXmN@"+0o9H"#)a~+!dIʖr\lC9&dC sT'z`. ZWj/j,ׄ}/}%@I~!P`'( aHXJ `S٧  cõSl6 T  d`1k5& ʨ: 96hmT*`cK 1ʕ+ը#=//'NPC[MK:~D߫N;[>yd V`|Me,,a  P/ꕵ6@fT d@,`PBY]֞؄Jke,$-C1#>*`c^ucZW{3'ք׫/l-{ԀexPuܸq(V1v"v;o,6.ǡ%`'% d`-&0(o VJPl{Rs0m/ z'[Y1<:ýz=X@묣~{UWy XK8&f ɘAy޼y^3^r= ɢ v@dwM 41@ DK:hq\mu_y&lnze+ȏ0`^w9AXWgl.2KXQu`_"bҦFH0†ixⷻᄏٓĽvU=kR`Xjc`W ^I[Hx)ݧErA2> `V,Ac`1v/`++kڳ'BmsgKXV:D&:L `T6,KƸժ !~%/:u^a}hu\U.S/f̘+ 0+/y`A)\/sZ½D6U];Lu : lX,[-RE0rh=t=>o_o&cdXٰc1GP \Cu\"D&:L `T6,KƐ,wq5g!;ݛmsgϮ'N&r_x)`Lt]j ܧ5$:X'QPF%` _c[;"P8eo A`u]^X,V\.o̘1jСjN.`"zT\>`l"<6,K0F :먣:J=c>sjᄏ6hyI{_&zgJK<}-D,Bh"ZTL>׏X'K:2m׮/G1eOQ9j="K%Vkxbc?d{b %KZ,ւuI#>[ >'O5ar.ӳ uM`v?lzEkn׎dFMMMSӭ}IN & 6 #,c.Z#`[&kvt-UW d{s56hmVs;kޚ=&[nEmmCLX3&lz lzE <Յd%`Cy?s`a'% DFAmQ Ɣ! pj0%m2O VсN:Cm.%Wbvߠ5Yz"P=O#}xc|kBMu5^H[/Ν;7t"`ueOMj{Z\"φn赕]o K6m8CYaA,֩s< I\JuW2JgDlo'~gtJ~V<4uV؇5 n>NQ=PݻP.D1%go ]tS~9s>vs᭭@~d-lgd$FMRmh d(Hz?~Ho\,:F=gyt7zc`` 7n,`#|Pa2(oϽD"R##a$x'&kJ`wd$FMRmh \bL:В7vM7U^{4N_.אq~4G?<$XeKM!`-ZW`jP>lF=K'S{>%; 1t\zm"4rԩjžiҤIb9z)-:vTM Ξ=[U>1mOd;HH{ ` 삣TKmma)9d}hS`U,ŀF$@$BnMB{O~ /԰ ̘FpҩgK\yN%`Gb'% `Sr!rR l IpI(,vv:rHhѢRz衇=T] E%lw@: dVXlj;O=JXO?* >LX;s/Ǟc|~H%`jBDZ9QuWp+ !:q-c3KYi/&Fq ΪVTKM=ar]wۣ| kΟ?K;m4/r-jРAxj< ٝxkqݻW/cܔ' vE͙3ǩh0uS{0ԠO;UhW=E[|X~;C(„G7 ӳ{SRVJ%AXlf ALԱc|jƌva "ZtjQk2si4(=HQZ $bx_x,`+Ʒa/+y6p N 3Q+@%f .Tc۴i*+\GH.Xl6v V?Pe,[Z?~fr#QH `3҉b#:vUzt^&))!m!` Y-[%xXl߬9TK4aR=[n{52}.iˤC%`sh]?%X{<+æU6'r2&[yG\O\=oAKHm\C%` ɛihXCXթGG="VEd D/PI֐E>7'63<#9ՠ)WK%`Xl򖂦%I7H)X ե@<+$Q,'t,"ٳrb˕W uyh*!غK:O'Ym XlV ~av%_f! u<+[McOTMiއ[;/C6 x|+p>=+5&6p ` ~kyeWk& i:6Qq 6.?)Q$%W"hiDv%ʎeep!` a˵WvmX+∣쁕tE޻͐{ĥBǥ.-X,@mK~۶m} yy>6ɲjT[ܝ TP^玘gT>)l6V}s٩.Cs?^)/2.]9s8 a6Ե}GB`szE]̉ -ey3؟`k`{myfW^:"최cfdĒZ41/]wA 1k-t~_tHtqM'l&|1t`R(ס [XdQXg#} l=`kR;6U˼؄'ݢ7mC؆e;;%lΩeCg/vcer,f~۰eoX,n..f9ucP-ͲN;_ٯ^(H6#z߾aa ?,]gu`hhKCP=IpTlI}&W= s`/6;jO$[l Ap.+0[I󵽺c;ز]gMn ݐl uu ZkOkz!c 3ܳ(9r|Am/7kn󋜲˗')yUR>g-l\~R6%"'@ ?_(;?fg 5y3T:U̐S@]FGǬ ?`K^U RtGlkQ F*aQ@K&ZONvq ն4\sáܾ.לyJ` JQ n zI6/4 `!*/ZK.04엲Ug_AHC4\ԁh1;ZVe9:yclϘ҉TW4e˸]s*3#|MPm28xq,bs68$`iģ@oB' z,x`74J$,4$sOTlB~@3ʸF|WX 5={fDž}e ~G2UfD.!ɘYׄF\(KB]rm85˖I!eH'.0.,Vf/tRGݾ9I$L`^fX˯ I_]Ji)ZJ`LrQ6fXVū߄Dk8ZZ~%dva6K9fÝέs˭Ejau~E9] F=>7:(:Ko3%%cP+"\p#{d#P)@%:r"`g;$ű`\Tnrbx[t%`sfOz:Xbu\1Hok.@~${"'&Ƕqc>R&kdt=Fܟ C,67KJuZ` 032gaBSCŒT ` XF#$B[k<}78Yɤ`ՇP*X,F@a_ux%.x<<*X,F@#` ʤ`3uϲTn ` 9K$݁xŞL#ueM%֭O Xd@ `[,f11@YTZ>hnO?T&yYK=uz^$Z8@IMX4st#G}~_%ݡO?vX`;wD͙3'0~j뭷'eW577;tM"ϥa}3w ` Q,l[ظDf]Ϊj?_*Ph!`Kr xw}4 3X{q^Sw+0ͷU;찓S{mvmwoS>#Lw=(+A%F htqnd2`љG)26X4bsf}#7ܷ9N: vxlXR&'6zV*V xl@%`nlf"fPk~cNMw/W3$Ya%`i2 0WYGʳё~-y9),i1YtuL1ј \j:9ԒqˀϢma)9d}hS|@6wQq/}&q\sj~͸K6/\t`MR`ibP /%.yd:+!1CMϤ{Z ͨ^:q@cO0Y;Y=H$ʶL6U~qQ>c}<灜,F>\ruvꀲ$ïiX,*@^JUIxuV4`Ő4biR?㽖o1 ^-QmUvh]Q"yt%4CʳNj#(7^W.+k2K=L Ica_9>`O֖{qKϢ䥔]5^]WK-9-F.O04=koڍ 6RX{oԣ\x1 6_Au[!ߘSR=96/H.HƫC%`Mxq= ^Q3$_/h3Q,3x|À[,7vHzuʥR¤;ӝr2VQ9xA;#` oXۀ@WՄVs `즷Ƌ0»k$n[ףv'<X'[j5)'3Q`K%>ZDȃY,#$i$X" !MViV_ tۃtl}7@lS(@MϵCM&@ hh*xƷt\/"ꫯzU3g]ۭ[w5GAW `[_@p1pq,X6C4Yg^{5oWLSNU{.l>+U˸ΛqKϢ䥔]5^]WKJhg`Xu=|b\a;*][nXa fRby)eW$e$!` /qypX.=0&`b؃7!I'Kw}j*u:@NTeRjrɌTjL]WKqJe@9JfS 0}Yzʕ+= 6C,„;d&*y)XDs$*wF%^2/UY3_2Jչ&%p=蠃ŋ{epbJ(ǁ>qyQdK Թ8d&*y)XDs4Mkt8s^6<":L `T6=ȠqnC]:aʤwkA#`ix^n{`:u}<Л?[sN뙐Ȝ`LQLe3Pbʮ4:,KeHi0駟V .T\sѣ+\5g;73$c)L<_,Z!:L `T6؅{vS45Ël,4kQi<=묳fmƎ>裺yۯݺuWnӖ)֩ `4х.DTtmrj13`#1uE.Ԋ+26[mm 5m>sjܹꨣb5k5UmŸ]}WfNzCu#Xm`tav [N-L+]h'zЪf;k5 ^-YD=jwVV/_Rt7頃Ju#'uFf(Ѡ}KuenZL%!<$ڞra ,H0Bq6 /Zl0a|~3[o?n kՎ=ZuС=ղ9Ku"ا Ff(Ѡqz`aFNhY0NM,ߝ gXԘeԅ#Gag=20uOv;1/gaPIjX@ !~ǔ,b|,@kbq7|Svi%mll"MDu2Ψ F NS[<Zn6D'LQm߷袋7tQC 'ekN ><0_|(wwT{vʋaZ_S~8~66P `^_1V3g_Z>CJ ۩&jk#+@` l[}L؂XBSAFcgm{ɓ'do[#SO;ep jСNy)L]s"to7tF>|~3^O4D+WT/~;?p[5=j/=~{iso` n`f"'zސr1 _0PrĕXxQキj?MJ' W_}kVu]v'|RkظKKX~[ptd6786{BLK%L9sǏ~aQb<[n^L:T&y"ڟeMtZiYF8eol?` Q֞|VXlZcZO:HuzyW}wۭbx`o7… ՠAڣh"駟ƞx'|R͘1|#`26߷'K'` )5{ yT]!K\s52ط~[&@;{ァ>0ٳլYc.K- 5סɞyG;)PCc;Sc1[cShhvN1]w.zj/+TݺuSƍSΨqߘkV ) 0ŘvKH\pA Vy7|qEhzTK,=9PnB h$NmUuNKXz&"ƼV DOzիzƪV>s\.>uXXlKCm ({x^ u<*#bO?_(õ^(z`遭g+{`yu 7Uf!~)Z|~ k9XLGycbw_o,_sp>Zdzg }w|YfLsFl;L!Ee3fFKI٢# osuJSƍᑕ Dxb<|)o.6K(a|4郺`c Klj<}SCetnf_/-YX\=?ϥY{F>80A(!/"^2:ۭ O3¾])+v[+մcAasǩ<lrI=THm-7xL rcFv#첪ڇmõ^(z`遭g㱃=X,fuZUӞX٦1|x(|wG,^dL淀+f2Lflٻ ۙXۙ;4ɨ?ؾvWÖKM>Ǒ LxEZm6 n{ W\qECѬSsL,=@%s=Տ=2:V&x /k^577:,^]V,b -f6&/;EulM{`ќ? U28[8kL`0Űe~, $lk޸>lN |Zv{ pXz`xl7,bM^xA-[[FcFbxa%wNo&sXYFǜ * [wL t"\?BXn{Υ'ծE@?TVV6Ӌ+b3c;I^8`kki߻kߜ.+y!9'qj}`遥<sO5.IJp[l `NА -JmW[4̚:^D, IO$? %)tOʀVs.:>qi5oݶ>B}skghK߄hbtv=MiF='% 4znB6?G)iڕP▥qZ2dp l|XlF}[jNag)lg>Fcam G(cB죈j8>V{m{oo~*/#J'tTD: lqEt&͂ծcLضE$oYuZ]{e>,FkBME7 m4zc^g=:va)ST{n OcoyeBrwV#< >"D\`1YӱǝzO']ho:c8#0-}K.JqXLi̩v( uYF{TwrK 53ACpe{`Q@<*D5 %KI5V@cnO}Z/E慓& 8&hv]A G՞W=#ӽf7\P0!$f͊ev{ Hf~@_xD;oك%Nb Vxִ` +Mg7X؟~a?w`+٥vbNJV&?=Ys~Z34ﭔyCg־pӸae4es;i_]+C'`[Ծ}o pBi έ^y$NǏW[n:g!AŋYzꙇpS6gĉLĘ燂XXx\`ًὝ={5k1cׯ"%e=Qwbf' v 0'rǹXN䚑vx1% RM\,l[ظDL`1 )7,b? ! ~RDʉ` )~P b,{+.\P=3 PxUWJ+c#9D;K0]OtO:نyF3&rI!xW A@ .kqgf9@+ٹ)!ftvmL /EB[b$D0Wj%DA7Frùx`˅d70|(țrӘgKm,!K,=yqԱ%/G=iU<2:AK4^1 0 5L%` IXaksTMY`a,6ٹoRzL6^JX\$Vbm+Ltxk"dr,}mLcr>oܲy RcӻE,=l<ݕSR bs9`L^vB,to[{cefG}v}`j!WQb} SnY\z`km?sT/qޠ3\።z;3xL픩37t->{챪I-K&[`m]tZ+a'qbX~*}4I8U ٕhC`ـل%"obs GtɜX %M}o V2$|юAWܟK,=9pɘ+Ucz~Ue=Sp^Y%whƛ]WmoW]Lb|]?c0K 3 lP Dl#eiQȋy[f16㶷 ڷu/QנZ`W6yU78O tKlP`oi-[2'yj;vl(y晪^J:dNgFe|9ڸ5XFyD%`T>6V6` --Sy(FXlLjHcRXV[mtRc~U 0/h;Hy=M\66dLgIl3s%tgX,{G` !fq&oWuةw=$O.aHWRƬY<;wnD͓%`uoܐ S+݇Cc3iFɢبˋרgb|l3֎x8u^;b`u]>j&Ϙ1A}rbKUsF3hYg@!?yRKrF!u`(z1>vq7V]wK$N:g)ƾXC m5'k2s!` R9,K%`+}A]!Cr bCwqGջwoή9uF}矌UYS|w dqw} kVJ&b6>#S}Ob3gz Om5IVÄ|X'CK%:*D%` S#FrJ?>'jOG8E5յl׼g ?['冩kpNUNxF}=&tС~xb$o;,Vnu_zKM%`ng"` بdoŋj|j.z=.:Z_kƮu"$q+m` n,td3S/]AVKmiC{C*8q-Ԥ̬zz2Y3/]7[nnP-*kj&?b9t饗M7ݴt!8ݰϣ':Y Xl 6.?)QYףYؔU0! [lYXMs`u˞y?X҆'^i3g{7H>}7cq<X$~_]v٥t c\n֩'`XFL)@- 2:ɵlpU%C{;`asakU۶m=\GzQGy橅 iӦyr K`r7N;+n^4p%:؂۷)9"عbʦ' sX~e6ئ6Šk{I%8K.D-X`t{~SOm5#/X'"R}_Wn)CN-2@Tۨa0eR5nȲ L`4Cf# a=0.CAn P/c6'.ƴBlp)yin-UW db u7~ f8מ={= m|IՍdD td9[-LlN)@c5V`RP&y„F:3hWO F H3 ]2R/9:<:u2^q<(Ko1ҠvS%w-^{ ϟ?_M:X#M4Il06W N}xIdtd994bZ~r6g*ht ~0} 7r[ hխE& ˱2BM/pjlݬ,o3KzmڴQ'|2ed;cK\gϘqlyV P:(@5h0cʱ /H.HƫCm} si)7=W*2Cih+PNJ]q%pXewv1,rΙ]㵋 `3n)YT ` BLa2Қ3%']p6P6@͸gQRʮ/H.HƫC%~F4o+Xs-3)'YW`^NuuN9 +@^ ( ʝ` ,Bl1:Hc`)wr,μf :Ru9N9 +@^ (CnHSX'3$39*@u*lMIʵX,֘6mm 6@uN21XG dQ,Ke`HUu{t*(#:GX'QPF*5Jeѓ666&G:`swIzBغσSx ` d `5P:ؔ]> Z: ~P sF=L0a&z-c&/m嫔:[vx)]ʽΩ6$g7kac P:YeYi<\!N&F/Giװ*Q60 P*v +cy'ּAmIaʼnn)=?tw'a'e|Mx8'*uR''`e ^*6 ]*Hȧ;%xZNOx-\͹s*{~cebxi!T7X/a_%| r̝uǐmm,)rpa_M;2D&v"ʅ}o!#яaP*PAj7\M `0@,XM煰RLXuugoːh9[xd v9AфvDo=&'8,d3IW@5+q02nլ<0dxQ y@`{zz`ۇKe``` `g DPcQS  ՜c QeWʰ!:SgT TP/A`|$ DEB 1`"5Z荠Gmmmm[ ,%RPwt4Zֶmq~\jjp#rOT P)9(jcxmmm5asFgc;)ed ъ%iU&]1<|t.y&0cԕֻ ء7nTo12~Z(cN~cBfI0;1Ca=rN86cHPWoy|f⑮5 s| 6IBRh]0#QNj,5a9;hf#n3Gō$J(׬ B5Ȣ79K Ģa% It9{$@&x@E{醲r^ԡ>2q<_<6aĊ l/}>(+M}ӡ Ûd,Va^Sl͡t0ZU)KhfJ'8 85!֮jY lOL^s"ۀx#]`v:c[KYI{ҩ>С=HPA] V+`2ZE%I|J]cx6OaS˛3, רa-;fWUv_2txzA{v",&J7ؠßuXUĝR\$gGuŎ =,;,E 3F}ze(f<8Dx-iFFN%F+I~KC&bpxH(`M[ۨT_$V }f>^|^J(5GnfWZ?M{fE7جH;E.Cx(@ `Qaإ=-"uOM;a|6l70m\q,F7AkB,SA y`f|['Vn80BM&aĵ< ޱc/ḩi1X| ++@~~]vd+Z< M"=S#foa`gͺӨ vʝK8Gi㧍YI[hwUNnP;ҩc yÌY{ 9aT9?wh⍓E3?&%%@mQ;l5ss6ӛ +6XIJy~}.1:[5+ǐ}Źoh2=4bsbxX&+_2y Y$jM1dB͘YWd\1qƝ:=rɧsL,'|B8hI8cH \s?cI1ܜ0Eډw]רQ f 7n@y[d{tÔ)y,n^?Tmvxd̕0l#7@=u#?2k:\-|1{| =T> 4gd JԞ v;V Gyrؖ@,uَ`˹͖lf8/V43Vǽ³ A`ޤaD #j˱7IvFzZWa΃ym^ml\Z!?S}zcD 2\кm.GƳ LaQdHoaʌ+ohŸ(˥T D@S 8E'.`, P*P'z6:UY*VsfcEwC[1ݧ {ъ^1GT P*@Q L;X!axLbn$?8TǢT P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@fĬXi\ẏSODT P*@@)%,uy֟eO ([9 ,>AL,K1d|rJ'\ʋݨT P*@FF@+jKj!ؕM]SΥ`fQOW~f+=ʍ. o, P*@T P0=Â7@(ҮJ+N~y ƒP,M^m 6]rKuQyT P*@ȼBvb]O$EpXxŻX),X~}P%@#1`d*@T P*@@%d Wp B~h,N~gy`_6 8Ts8=h@.QGW(:+?`ECeV5BrQ~pHnT P*@T +P `l(ȘN'߆#6!$^H? 3cL˘ScHMhϻ̺PNyhtuL5.Wwk?Zü[`%4Zݢ{1AT P*@T P_,\+J .M57˒ c?+k1pi?J1 jX_{WlzM-5?~!2|u2CŇ%T P*@TYs s< RBfz<()Pm' c6t1ߣXYmmv9WW>B)c'Ow:.۠\Q*@T P*@JKRUni9L`]ARH3+^a36 m ٨6hvg?5V4)w [ m% njT P*@T Psd9,V/i{* r". e{/XZ a~\",`yZG8f {рl5CT P*@(M$ڢ شZ[ !A;JwY:*E9rΑ۩T P*@ Txx[.&,03VF偅h8o$ X5k^.;78@2O /Pgc%5F@ J=b3]onkkyubM P*@T P(UZRԘ=gSpr8rk"O]}ʕe ǖ8Mk6u9' N@ל7lt̗-'e#p鶹J!p&py\[n nWv} Z_+e+ufzC p \kdӆDo2 3K`N|7ls8FuI{+7nSx^9[k~@#p5뮹~n>KDޥ%iUӿy˹m̻eV p~pL[$Nn p p@ΥYuT>bs屺)7xJ^K[Usuݪ{_* n w9nI LMU@U_"U&V'dZ`KBDdE 7ɀҎLiI'p@'peI+bg[mGs@% \'py ѼymZR*N˜@8f89 W+rfDlۦZuͤwv\xWp2U蜛ULĭ2 nl\~p0ǁ&˾yt+pGۛ}g?W̞<8@8@=j-Q:ArU?Շ[g p T}"CT^(z~  p@յ@1_[ p{@-zV1UL"ކ#pR%ai8 pRU'a zi.S8@*:^}z p A7˻i p7WΛ6c#ru 1ۙ p{muM6, p}K;-o~7Ьey(]y啟߼yM6u#MlܸBaEٲe˅e{݈e?VN @zg۫_պ|辞Y۽5eNc{ ԓߖ^#Cx6}fVk>}K'}ǚA-2(/N$\2"m"[ܿ7i]DD$:vׯ4?~y8Ν;|'Zrk׮g4z'4 "6rOiYN4+p"\d_\+%Ѭ_8vX+5 /Id[oȨDgff>&QEN&f;ԗZr8yԏOY^ "q"t C{qD٤?Y$n?Vx3g&Bڽ$xѦD/YEf9`ed5|aJ4<ۥitff{ed+++S!mq,,ѹ 6|5$i8?9+^ \L 5"p i$VH&Qϛm 5u"pC8)#3%ږihfƍ;33+t0i'DD,Ҭj3 pC8I*#He$|#- LӼn!mذᖫ&e鸼$m۶F"w.pMdJR4m8҈ pm.˽okv…|gɈLKRK'y_G;xky,p~G$Ft2G~kuLC:8 oSO=o^'"^{G-4'9 C qeQ%pRtΝߑ/p"k?pGuz;8oh2 \;KIKS9an@`lN*(IxE^D>O~Z%J"`"Pmּ뮻ɖlKǮ#O8(p|'}{ُ/p;ɶ*~տΗemn$d*iFC8)%B'5$tiN|}INEeEt/>*hNoX񿇼~NU>w_]W?틧ø(\q!p C^L8۔ (Ӵ Q#=&LY_"W*p?X+ UwwmfڿIh]Y}I9WnýkEnx厛>!T0q*]`dw{m& )P,$PYKfm3 *E| nq/I["0'&IM@8nLNRsܟ>ſ ?Kj^s5gu4 NrW|;Y6 ZT&oe]F>ڪv?ML.#?wDDeLC8Pꫯڇ>u}x%ͪ"pqA^w[n&&pZo>ڣVn|9:"HVd2Mee85]G  mтvK]F~|}wҮ~+eV#۶mDZ^2[ٴiol (p*T6zr F+оty.9m7q7odPXo\o/i.ݵk׳ҿ0{*pފȑ/V>2ҸmHPB?,GD佢Q9v@&Ñe˖;\YY^裏6_>i&붏  pU"cڟL3-IGT"/mIA86;;333ۿ.ٳڦS+|Y8ی*BMPjI?7`#A'E?!Y@Ñ㒈]c=6u9IA6I0= gY1d-fNѮpFʺ2@͛(͆RjRs{= ]tыGI`"Niis BF V"pMj\m:YF[v/!p=TvHmOU*J:{MqܹL[#G{KXS}D̀)Q.jd<4iδD4凊.7wkojeIKyt^~/ wC:{lNSqGDHF "K0^ܫ|i %.97rH~ 8bO+5,8@Oщ%AIK/ȔE#y"ocƍ?KsE{pߋDDbǢ pT \*;I*8ynZ}^t[lމV%W>o[},[G88Fi81Ras2VM Y[u(鲡&T'sx{ߗiYÅW`Qp \vC8@{INiڌUEyiщޢ׬}hBE8@`.ois(N'%h?~NnW$:"0U'$r$B*aÎ Ҳˋ2)(F%mwF88@C@*@88fd~^@18M+h=AU/IL#7-' F88^ $ -KWy!-B8@"oEɼ4d CN^͗VOV lKk2)VkW~J}8,,F^!pPNu ҈H/,cבZ|^dL[ѓ:WvlQ{ݞJLSe}9D8Pi\ؠͬPDOh//u:WHTL5Gnr p-^ɰSe5jהvg?/+g*zU|c+p*AI͇$ p p8mT4Gr"EVO~6XN4=lDC8  p pS#p[_~ |7{CC8mx)y%N<98@88@8@8@C8@8Bo} rȮ/8@8@ƙNn]E`[:Z0wח8#(E4.(`Q]Ku[K>!\X͸9'ry..nl!7n+=3\}N͵uwmo+v'!p#*޴~>(镈^Q7h{wuf*!v?>ќT~;"q~,>r躪F!p.VMs9#m }IXAY uULd~FT("p!pOv_Y$nNVąXK/яrG:9بX}YIeY[nL)y*&bWw&C8@5ڇc/ymjΛVH#p'oU|ZUrQ5g}e'e8õMWqb`6ֽ抁\4mO/Bt p7>ק^hom>_|8w$>TupYv?7ѷ{ K&nIB94}@%#y(UmjU~o6@%.%`=0m]Wn14mޔܳL{?g!p I{aLCk˵'8Oa+plN8K2_85B^@D$fT^!p!p@q-H!p7zH Vd62CR6(%&&ͨڔ*5!p_5nڌM2}%/\&ӉQ]G%ǹ #8C\jMYVݘ*unN.H3"}ߤG8CF=D`YĹ*SN ,WbޫĐY_$ \`4\s/P8@8.k0ebaJej U3F :u]}As w'7绻kG߿3_U/I+z!p-vFiIX%_gmfJeB hVtpe?3rkK7\>/V`5w GW%&Q7i: !pHwвYm/H['` ߤ>pKȽُf^Pm:#pZ'ͧ0ivg=0=5mЭMAETڤߛD$H/8@8?\޳\ 0NW4P .p \ tX'pF?\M/:^ZJKGф!p7뀤0^n 93d"knE~]n[Ծmf=1^moe+__ovN8@xy>CC\*.iK0_u@E|gZN7%))8i T7K?88nn2'TL9MsUf7ɼ38[ӑ&TCv 6%z@FμnM/j8#y p\ّؗQ!p]cΘݫyNw|g4ij?MK=d9"9N1 ppz8M#"zM!pgvlnv pЗKӈhTȀ!pK$n;%pZUp*s!p\q-Aj'HN襤ŞD8TN"nkŞg 8HEN8s78kځ=KG9NJC8SH  Bഘ ͨA !pd }K2 p!pC^@`(gn!p\Qqn8&#Ry!pK{ p0pc*!p\_4I{ B C8NqN8; C8|&i/ p0XKMQ2 8@8 K{s8@8I{Cːg( p\F!peH(-pg+#=} :#i/ pq'ه'#CǾt_s۫_N}7qGs.wi I{C8h{-Y&&BҒ8_"p.iiJfϱ&??Z}0M"u<[o ܑ2'p$m/,w&m#ozs)?ҝw4Y8;-&Gܴ)5Ko=i굛e~/} 8䤥TYri~"y{ byڟ9w'#{N8NxC8DZG&VtsS|'DN?=KGA8!q8‰O Y8^$E8!q8 騒s(!pC#pS!prHڋ!pC#S!p6sBw<8=7'ed@Zfڐ4 Z 3fg7~~מ"25.ud8!q8m*6o/$GG8!űMexMC^/BhNI-g>B\wHdE4q> 8|AhN4x=\+Y&!pAn*eM)ZEc/&GN8c!pm\nZ"p pMn`By ceEhB 5q|p؄:M%85Mj}m$pc/ѵ2=)g^U=T\)amho{8ܚ/`~6v8;cn"cZbIͮyzqY֗8%)C$uȴ<385UEl3vG'Ix;=w FT-є5^3sQysЭSX0f4%?Jcs}]G?xg=pxi}w.E!p5iuhSmNS;7kg?7>֍S+h.$#p]nx˭."nP~tQ \hQ^q+ѫLkȱ1;:Y+\c]tAC8n v/nJ?ZOG|نVY}mC b6`f`ќJf;%nzNj%Jq;n|9^Cw'|I~w>̍{K$`c-p˧N)Rg+ _DRJF&T=4ݧD⒍m٨8]?TTM]7 ,8[0 70ȼ<.pI&{ jXϻ>:&C*IQ~pi' =v3MxY~EX_ܠFZyMV\Ɋ ڞ~TtZ{j.i7d>[i+/yilBO/6 7QgC=HRG$1lG.t3& K8˻6Q9<5MoH4 kO'?hD'.  _)/˽{6&hGM 9!\j 7Z4{sSFɛ]= \^ q֎1Vgf#֎#)'&vΗa$$=$ä"8}3FB{l4=_CWX~ŏN?\wMUݼm:Pň2.cCCAͺi$mGNE"JO}j2/&%M[KZ5Z 'fIzh]j%)6{.kIe_n˖󣽡h} @F"pClBuIMX>OBQ4tqY"նoM9% 5}jGAH \:ǹ8:Bk7W;6@/^ky5t}k.A4Ku¯Gg`;%pk"PRӈu칦sPkz[)ϵӈlٲiW2^JV K7ȒY!p}( T;SqM~"P h}ŴSd/5o{sE71d_I()AJ!}#ceͩr~K42Y;6mܱ{MȡLIe\ۚ^Y1 M |nu lbP\oB6RPs?tNjtzct?Cǰ} i)Nsv 3gW$p"#p f "p_#6!*~Iuա=iKLisD- F[?+NEfԁK78/I2)O85߱=)GD$č"Ljw؅dI>rX:(AaN4!$R p4C 鹥N8ӇͥX^lr(>oR)PscoSih6srIlS$5}gG&vJ8V=?A/pXz/t"ycr!p pCS4 дM 1G*IC NK)-=*Eo~6 &ˮ:{ pU3i^$%ErmʵzٶowF`h7L[1{@H oC:9^{j@7h:$NqcMu.n8 bP#!p9j"uE".x!p p,KsRk IjlWN /͸}u"pMY?IBe۽J*<8='p˒,4'[^#wv]nٚ~6N$p~~^j(#pC#pc)p:9%2Vp9SJZ4T*Uv%+O~Ύ:Wi ?G+lY>zqc!p)˵'v]ۋ%7qv'+Ez!t_!S1Gk%#8!q8PG)pK!p{=^{ٗ8'#G8X rT5P;=^.'\rT-&tϒi3h鶚K\ŕq:^8%7M1gRO :+TCٗ3iZ}8!q8QS*[n"NQ;}٬iH:8+uݗ?AL/<8=7K}Hj8/89.]M`:˝+nn :x9(J4"pKrjϱf&dt_qPrILkx厛>>G? :? C8sx87 .'ܹI.p}}GNm_<OC8w&ۑ]F/t"V.p:y9םǞB8[NCk{%/ pMMpҜ!p/ps+.!( pH{ght5nj7*;}͋.!i2%-U4=ѽ,i e{ρIõ~C 86߄1g 7ZK?C8J8;\; -< p7'ͫړT~er!p!p!pA'H!p!p8y3C8@;OʨT8CC#p˒C8@+pC8@\\]=!5R8@8C8.+TqC8C2"p.'ܹI(p!pMh+p85_C8! RmGtΞᜁn8C8=#"y!p .;wPR9oC8oeDNJNI+ppB󩧞 !p%Eͻo^}#p!pIݵؗs8n$6^dC8k{p!p7=\;"p!p\FN[99C*pZ5=!p@q8}}ӟnEgy4"!p2wv z<97Tѧ|>8qC8D^*pBN8@ DBR!puY[y78CpKy|8nh8|8C>Sι7pm/7Ji!pu}/G<7p MIE97pӺ6&B8n=G|I88Cz:q> \$&i*!p=.'9  E%?%'Є!p|4  p.r3S!psiIy!p p8)%8C"p2Uj5C8.5;9 7ț: "tRJ C8~,b? p@ΎFѧ"sA8󼾻wr -@ ܉'V+3ڨ!p6 pܘ DDz-99kG/@TN1< b@8K9'7 3w00o* RZ>!p@nF#\y啟߼yM6>7} Š[7oٲ傿e]l_9cZNFJ7k]}G8Cz>ˈT1ȬEK 1331.K/G*[z{m>-8}@Ξ?}A'?'ᵯ}"yߎNFl4/8}1 C8.\ 2!p"<lr/ȚҭGoٳǏ_kwDJ'El C8K 8`,E܈}O<^+++wnݺLC!p p-p.'9iN圃\.{[ltN:gle~ ۔biH⾙ Cos˜smذo{yE[iwĉ־5tƿ뮻n:yIF$C8@_$ᜃTNFJn߾z]gO&#'m-MgH[GyH@&_9ZrAj'?|TNNOSaבmؔv<ɲ2]~R%?]]_hX7F@88)%8 ^۶lrxR􏋋hYђyϙ|yt;S>%MLҧMiTqWfY8i%S%M"1Q#v?>coMm''$p{h(N% FgDb(jh+Kj%}6mWc+RC , \*x3{jie 9[ ,X'LW[_"^ *0i Q]UHJ6v"p=x0Μ9TMkN埡Y{GDÊXGT#i Uqϲ^rބۄڍ٨,/͠@麡"~tivW۾vJc*M4\or=5] ~2M8nNkA%L{5o:UmTt_ĹfčZA,C8FFo'H mSdsѰ_>|ͫ LTn"p pFj;$3gaBb?\J΍lU}fN#p  `@90l[pѪbH.h{BWksfCN _!_7=?҃!pr= ]CZyLҊʵrwm$o8䄻M0VpQ p!p^""z~0d!L~.jC&V91|Y?si=lʀWJ`F8`:سT=\} ps*޴F(Ȁ_BC&õ=!p8|V>0ꕽ*xӋ*1)s#{cwߪI[rIx~ {[hBOyh pc#p/_eN8n:PKF*Fƴ"&-!V4/M;Vn(8}yߵn=,7~\B%D:#D$!pEom#I m.gWer~=++yR;MpKѵsC%m!Q*y͐K WFLkAD81vSInzMkZ-f]ҡSw1Aw(֯k-W?p,K^85r$2eU쳑$On[ŀ)cdۊr@?!p*)&S`}7iF iM )@qC8 #p]CCFD8 #-vDKЈPzaK`3"r.an +g3]Mo. W\knre?ݚ'?݇08D~E}`:@Q;,׎uMpq ܺT^z7 foa/Fz\ Y i{>x& :¢W̾`*鶶iTCuL%[!nX~KKtT~tэLjɖ`k{w/מ@.gZ px=$'u*qO⋝\ޥյ*L\;" p\ZWtѰ7=Xʦ1T;9 Y p\KrM!p!,j{׊dõ=!p@V%7 pA2Hu@[팮.oI _҆ pxmCzKRvWf9+.j48 #@^#`TimTVk2_3mƑɢM\|?8I@*2*p~Ky侥?Mi_I؛ E\ݒWdWLbޒMŅ&u˗#u?*脳A_k_D3;ŗ]/BCG{Cs۫_7qGDN[/qU'Fk}"勞^qrXtSWt4B\g;|>O4b !}l}UZR'dµ,C\ƒF#_dɫaZJhYhKhUJlǟ^ @,r ܧ!pܿ/Vd̓eykeBӺn'l3MJjy'K5} g=fڼ[?ׅ8@ 8 .Y(֏?[)e5z=ѓn :VBr& :_ewM]'\ ڢ67秊=yf'oz͛gE p0Q{j" șHO͎"jg<=0]F$LI5g?vN'-#h4yr6gϘռFE7dRZylgA G͈)tU/O4O}="4"d6Y i+.2$Z:Me+6jnNӁz]ƒ\ p@H8'oʏw'=*IF▵"'k.VDԴ֧iM.{Y2U[G p}kxN%ITZn?}s}FΏMGFJ1˭ۏC#|kh<jB{8~MqҴګte pN/מ |(uNH>[)ҦJګUrfQ$gbuVNu`0HI8DoXJ *V*Gqԋv/2HU3q۝h9̴I b@8N%=1WRDlXR34CJ3O+qq42'ےmE |Yn(.~IJ,֢Il|ش !pɸ (sD(B7d)o=]+Y3yBhrUL~K[{ V ~t݊Nfy}ُ-gߨ/ p\ % 1 RrL%Z*:TwjEtb pyj-_ "QrHڤ91t*p1 杘j4ڊ%]\y>%BǤ~?`k/>k1.Egͩ +yNi m Hu"p%;xҴlL4[ݿ0B}5eUL_ӄ`G2&CElM8y:?:'1{~f+ !pU#ju#ꁁeoz$팋pi$oeJ&4[>4ɵ-?biHFMAI71|-xK5@:,l3Utb4I!& p5aOqvZd7qkJJV?h7 >.޼Ri[ެIDͭR;"LCH`puN;)iSMC/[\̀B׶|ݦFBQ=]&TP/0x봫ڍ:uH8_hqDGʶ{T.3f _i^nG>ډ b%UY22M뚪 ٮz}::O+Ifa{j?_ه~^ !p0T\(b'pzz>|R3 0Xay9SJZ%R:aD"pON#zf[XvL6]ͯw7.^\ oFv҄$I%WnIQ? $z%BBbASw&Y ^n'q~t7oƔG}tpM YHiz3{w#pzMvK=~H^ezopnuΛy @I[:r[ v8?YlWgж6ӆEF`0I8'_NuМqOC畴ļyjKbed6nW^[6Yy.|v"76: Ok'\IT V8$È!p0D1M1 BR^e.N~bM5P՝-[>k (SkQ8JfE%]].onFnjqi8mF$e!Tl׍:3#78mfИ^"%/WrQ ̳ͯ"pWiCҬ ^TfXJ ;D"K''"[}N viWN89I7o_ p7eyC`JGp uy]0rVItZ=$Q&lnޭS67om5 FmE3h\#p7ßo?zAo$y:\/oRm7'_e7Nk-SԨٶOW4Ӌ^@z"^*p!pϧrd@Ϧe +.!p%`G|٬2b.=8C`*k%HR?=Ȱ*# p&&[\i|hٸ}pQ7< KM3kn%ifeVs+*2O-*P!pW}*3׈@r7DElNmZC5MUdyM)mO_8`M FTD$7+y5:~mC8ۦSur\HMp~'lZFLK,BԡO-:ge04MZp5F+^\[k/_pڥ bѽ1ZRKrA \R=A]._wQI>! pᇌVTh fS[0^-Y#\gHe\-SCb(Ҥ5-4 p\zurȪi(pu͆)VG|Yo{fl 'pYH6Nde4MQ$ FIW4Q8/Z{[#gG+FCj_9mJ@Cĩt "j2 >P!Sl:*U0.JRu=:;@%ai12`͇:1+(`Gq$^>j~j6Tt 5"78>pc+0xٲ#OYx}+r!Ls8@8ٰeȹ A!p0 pTY~ e+njޤh/+Y4B)O}l#f+es-俜K9 ^b.꼙em-ՆB'gJ}/󾧢"NEWaQmO@8 [:n~/mTU'V4-}?w1f%JDI܂}rZU.H p(J[ɫP7Fl5-YjxߣNO %\MHa@8)._54ڧ%/JsM߹$R\U/ -m.e2=+RX=n0h ̜rk.ЯnMG4& N5Iӯ|'ܢ,8`XXz6Ħ_˙A%AOt|zW5uui~\L/@8Q \tL@lsZ[i޴Jq9,|@8 \3v) px0 8@@^BS$q$p;D`d<8IQSi3IENDB`manila-2.0.0/doc/source/images/rpc/rabt.png0000664000567000056710000012764412701407107021664 0ustar jenkinsjenkins00000000000000PNG  IHDR6sRGBgAMA a cHRMz&u0`:pQ< pHYs&? IDATx^ U H dWV5"AЈ#,FAAq (O"@%&D@v1 (*:3sm{9TuuuuZ6 `m`g͡޳f. @M2:qU_7M/*}șa]Fv lvs?sne +|ml*@@@J+tJ+'9_z{/l6Y\ykV]uo## `66[OwG +jk|ȏ~wXh?N76 `؀o;=5*GC`~МGl@6p#n׽2;,WÄQ~{'ˆȂ( $6y WslhВpZ׼4PP[e}͑ BAl`l`:!yo5OPPS/?w?@qs<l/M\eئ(((0 ?<8=qN:6 `aZO9~4IIW@ ,W^`yl`(m '>\uH`7؆^+@E٨a;606e4*@`t5h T89`6P9xWl;(0 GZԏ e, v՛Y7)}q5> n7M*l_a`9+ RRmVU~UWn3aC$ 0?0O<֒ t z[uUc_ٳg)SDh~\/n޸fx׻nA|tzӒoP! ,kcVHלl>9s8CKb4G2~} 6%S`zRU?5|0\{/ǕW-T O=;s_pTiӦnɝ}nݾF/iMӪu],[w-CT|) n7{xv;(; pp쬊).BS%̹7y: .M?;0o׷.ziU^:`X:,.Km{)[Y?rV4akֵr<i5cm[Qv7OSCE3 B-*8>ә?{׻5zݑ3q.~* ;X-VLLJR7*?"8#*UV{xcοR7[ەG)#_ByL dԵ@c6Y-qڧj{PX;70V;_,ɏ`Î3r{{gUW]z2&igD=p;i;g'j l-;d.++jXEXP!8'UbEfW}6.xZ몬w=׭mJMmqX~ץunVNu< U`ql1G&aǹnnpկJ}knҤI 5qq\=vP, ؂CvXA--bdw. jnVjbVGPUL+cpmuYR  leŃ_[) z[1MSY\~awGeYf˲;Cޥ`bleNED=AM9a *3ܦE|cV.Jk( ufYjuQH~`Ǎ3+IbN쏱&YsGRyGŋg}Ҵ`{RW2>V3k;v ]X-v?99} :Tɚq'z6݌.1]3zlݡc|u`%eVdj>ZĐi'RY%*Q-x 5&Y1vӧOww{纚.b֫5q^_ ZO`leRE'w= `ET~bAm]Mgkd(`֜<`yVwvpr98MdfsOf 1xc~ǝݍCҵ`ble~F+x Oj݃f*N돉MB![bOei~ ;WQFP[C蘴zÉXs[Nϱ^|U vhKll1G'G{UVi⪫9O^xQw'O 7ڸ'n<(s ,[wt L)vTW/6ߴ#slde`#`;6=ݡYg/Q+~g'>1Lid͘\r:N5`X}F: !kT,)LƤ1On`9:"_W>G?rozVF-ɌP/Z& e2/`K`XJ|r`9:1;|ww;mܣ>~5*]nʔ)L>̠;*5`X= V*li`ӟ/<3Ug?ӀleR"{]w uk;[oT:j?QY3+W Uչjl ->}f=[c,UB[ovmJˬ}E`+{p*-{?^׻|;_lTz'̙3nj41٘Zgόb6ݏz [o}ckSN9e еh|:\u^p}l? `;(,6.Ċw}wsql;C/FA.sm(Ns%8}m7|IgqK~򪼢sᇻ78MEME9zklm` usW~3~]F]V謺ݎ}@~ۧϣJYӶ=1[]S7=[kܩ?6.|n„ [vw-Xj&foOq;6J`167oϯrvS&oxct$7xjScN9M`5eMmFrUm`V&4@4UF7c) Zv`}+;kX؇~)R;cƌVD+tѨ㏻~v4ԩ`;U}6YSoQ"lYXEDZXu/#\?낫nFIYvvz޼QB;& neq'pB*zǜy{VF%iƌeٝj'm5S^me tjrqymVPv 63MS v޼y$bk^K^0%7{r"OOg}nTַ56̹`;m6ӭ.D` -F`($S`L9*F(h8c[ ._#p#}=[^ v~OQooh7 `jlYc`yc`:1]*a w˝Rȫ"XŋSO=4AN~򓟸Y/??}7v,l3K75eVsF5,U`f_u 'cRuX,XHCusjewi lW} `;wҺns>&c`?k]};<[{'kؗ^z}ݭ|`Ts}3'Ы h`ZuI/m՛^$ t!:8C"K-ԍU>[n'\VDžj)l٭2>8`v6mydE`\~y[fg!I~+sϹ:˭ڭ߀_| .׿oG?Tlg /^[1X>`+C&&%.[R6 i5تmTl6S) vnAlW_=<7+դN'reN}O?>O 7]veNYu2pUwaD>H/[bek#li"ۧ)߱ۨ60luDž^p!leRRP] lش:3ßYs=+nYmwfƬCu;KYt, b-ۿ23hM9m $}| Wy Nm'l8V)z8[A_Vmm!@Tum:^AS`k ^VIIA( 6W  `/H7[ɎH*hS#Xnn9Gs.pmove;Fg!3g"[EaՕ(o¤YMyM6q?Zլ$pGg5 Kt ׫WNr] f ~0) KD[ ԮyX}/n;5 *?Z-k6 vNuk`\lTdAIli5y+J*jWYj~&2"Vss4_IօVKwK^\1=, 뮻m?׿R=eVr9kO^ nMpsM CPմhQ"El# `6ܞ5a楃PLJ @in=]B<+1|%>9 ЅgϷ@ēN-Cy{=C_\0f/M_B~z{+b6wۚku5VWa?-x<O=b+B[v]V.9"DW44Sd3**p?cUWܢq^Bg2W LJ @(T$ai$ɭkU*7H7Vsϔ-}Qm"mtubV"i5ցtM~ j=guV4tŭɔ` YMn+GWCUa.[> <~>2T^O(E҅;9&sǿS=et> >㏻6޸>*ɚQ=s[?Y;[oeb9?dkl}݌ګnl{`+*P {>QS'cq!/86:U"/@Uweଲ p'X+S[0Zx["Ν*sΘ5ӓg$*{T!>?w?.ܘ|žs9j+XMKZGmx衇ZhsAo7~뮻57 =<&a 'b<|P  , |P,,*"yȫpb16u-6Ej<Ϫ`{{;\UwъYf̤MXE^ ^H@v{p5BEa-i 4!Ԏ]R `m`fU 2:>E*iKt 6V6`:l ꏱXK~6]ƖVuה+XE/^ ~|Aett(+{r0Uz=UDu7q x7:Fp߶ŋ&s;^O` W `Umnx̚؟ *` :k]  jl۹Õ̜VL"\7ovfEO=XEc7JgΞ:ooTR?v!2HZW20[AU(PMqi4)d6{+ykWegc ږQY~TyOOQa;Ͻa5Qǻt 'L/'|m2ՄNZSVsm~C8+ jYO:ia*1 qv<9c7W RYwfCZ\NdPk@),<~],MiPo]`# ;p!{mtXdGbTr4a2ɢieذMlS9Wdh_ZbǯVKc`N,xlNkMN_j݀#r_K"}XT0qj߾{>EO~Ҋ>mSS_"gҤRբD`;a![i ; U@e<2f=vz%`;wB=]* 8P$bkzH2v~Z3x≎ֆ6^șr7j?i}\{ CC"CzD;mǍ`;wBk -ksHY=_%u '-s„Qx=)9vV[fVM_cWۍk}rK `y jmYszʶQ0KWXθá![?Q l)`{yU8i8QXomn…|u;(^c{[K쬝@ dw*7/5`r#Ofswedҙvv tUE/ [21)iNeQ Ԡ 5hUWAsg7 `%I$v5pW_}ܔd Uex6r<@k_kMXMn7܊j4쬱D`c leźII++PXF s/kN>:k t >͓I`8yV[}ݣ_ynqۮK Ә.7[`ͮ|u׵h$T}{νpPЫsϪltӮ)5`+,ʤ`Xz,XX-GկvR,UUo׶0%["Io],Y tMf)I8K/=] leN[T,$̺`;wԲVݭ(m9`7yw(DOLn=k_Kf͚4@W+}{oT`;^Aۿ׮W63y1leRRP Mg.M4+1X%>9 ;V,+ Or8'p,ĚTdM%pdg@7cn7Xil.x?=] le[T fN*lw[E;j7zwƐjǍ1ZvK-x*r$w)<_jml`;^۟׭BB\V,(uwՌlG7g;;nܹcV /LG`yo}[nɜ4+7X+Cm c۹mn`;v"-U4~DԤdj>7'$pZk3eK`;wvNW%vm-X{'ft\ph"Z6Y+V3+ ;)鎬oiYPl+(-> 188a t!.A,q\7v檫Nzi"i5XկKerAQ㫪=Xhتr9Ze7qd'$3tI-UwauZؑGcً5Q {cʘ&nUWmՕV_ߊ |$: 56'6rCQY>%$O4VV) vhL(VPigItؽ op}\uX'Fd78]tEe`L"~qhj:85D`ÆMS2W =cߏ؟#Hߒ8K*|yd_jd:xmS޵^yJl}vkOYkÎt;sT~vMʫ2/s7ol]tWˮ,vjUF: XoD`r۩ 9}3>}o+2Zb@v$.X͓w `e]뷕29} g$%u+-Z6NƻǶh~[;A26^1lu~xH>Ul`{%0mFAvsG'h)ik5Ubu5r4nUXM"U6";`]GcWMͯr!SLmRkB*1s`#d+ 2Iw @`#{(9 +2 믿; Af!V qvZoѕMlm%dX(Nթ (^u 8`#mmh|cd2ҵ;쮺*wwm7h F dW9 B;I6 f!nSluG: 6YBHߧJ+`TjQ`zR Ol1GȏqZf5 J@tƌ[.\ `uxd<컦MsZՎ1]'.s{+)|9W,[`*lYD'زr PStͽz8 hiIG}Úk;oC{x(ܦ=ߴ_-}0w9 V|P l1G?EW};`ۯTޱo 9s渝vi8#g4h[ [`XؽV#U"nK[g98mjyMԏ>y{ح/(UHybN??hp\k`lÐ XUY LqX1`|O `͚(Pll1GD ~H!?;T>3Nr;Zp'V~Mn}l FWl1Gq) ~H!?[ZAͿ1 - {`X} F: :IC,;s6>c`#1[Ы6l)6a)b`8ϝ*`|k st 4 ,[7tU!vybNiגetۀϭV]uh~\ٳg)SDh~\9s8(C~ևoa}79_6@ϰ>,=y;tz7+|~l>/n5,|-d_XIipkߋj8/)}dP5U鸓m`#//;/֛aIyMkO'Ҵc]r \V _&~|[IFj&OѣjoWn>(ȫ>6mJpAyrͿ,]#dU`bQ|97_Vo 4A6*z0[AeͺU]W)~]X6a lPOMj'iHu,蔵Ɂs`b O'GMpFtm5P68Zt<+82X*x`˚`b? 4P.u8ݾVq۷"íϴOIHU:VyE4G#G-[y/^1]vn{Y)+OWS;Vm QI+-lWYUv>vLzk;ZK?_Otva'kb?Eg.Yc[D`쟴dbКZV=#[Ѥg ς߯}%P:QCX(=0Pҏ`P "zog0npp]ʷvXUiPC4hdD*G49(lN_ڦ:^ Ͽ F*OUP2iH~meZyt W=>N@ nZIwQ9a8.w\Q[!y sK%Ex6V`? u4bWI zBPAy  6i~\( X3PkAV,2@+S[C- :mR@hD0صs#jsxja~0:c|K!эwr:`l^ygKϠg2np2`*3fu[[:ҶOoվ:m}HPY hVd~-|aTYHgvQ, nkiE#,4xjږuJ{=tGdW럧{ܐ l|Kڌ=#%ې%늬m>K[+6*|Y4eX%ް~àzL`# @AjXb(`+ FCS՛_!kcpp986F:VLxu`XĤY_ y/ד={e{>c~$4`{fÉV+ga(?ígz 0+$+Ut Ml^vSV6`BBdaN7-*:"k!6@ KkgҔ;`XϺI+yN3%|akz{ļ\ {v aOzf+IWlwš +$ 4UD`j61gq YD2|V6`}nNcl`Nb $ۛ_ [jpx]C 6ζ]u|XvyQN`XM"ÅbVeY6[k޷AoޱlWM GRIkRߺv:և:+;i?En>?ipipo]Ah8Em5+Xp[ڟ!l= cu'/3u]®u{ZP`X>Ll+Rƴb5P=*ǯ#X/+wl;H#YƖ j\3k̨a6ɢWGtL(ZSO`-ZW4sÞ#ip8 ~xU]۽W]gY$y؏5)0+GOCN'0vy"uONQyQVy`XnQ_=,gtZ.U /3VZ>Z^VmɵɝT oT,NwyJlr$o3VYgJɸWSJ{,k9iCГp6 l@S|_T蘴zGacTn/fs9< XPll3.аkw\ `H\X~{+*rQ(Pll~yP=6P ,&cH]/As tG6RW-(tXte`8j`#'N85 Al, w8 Z stXte`XܽQ(pk `x,[xE"w5-.ot: `H\6`;D`kQ7K˔!I=Ui~e΁ Yvʔ)={+r̜9s1'Og l`8zFeNeR tU"ِC\/&E8~D^ iuN0QRtZjWEUE=9ԑÎ,66H!?$ 嘥; } 2l`mT7XmJM}5}8H(/6F: ITWn>(ȫf;r9:n oYAc8Ӻ> .{Mw׎ #H;aY-뗫\EdlЛl,[f$v!օneq(FH &k M ®Ƃ:ZY֝V 4Vx[F}ǛZ9Y#eO[d,mЪUVZ0kqEA63`ill,[ زq\Ԭ'G6`& J+8 ZC8A^15߰I|NsڧrԾ"MC.ayc\^6 46U`ز:59м)~hl#~$3tvFj{Yc] os6\i݊<~1p'`@7mmsJX vU^ G(;YKdM ljuMV~?qX"B^x/**u;ܺF@/+B"3 4C6~ l)i w'B6cʬx[+.6n291`7u $[%gBDS' t? *M.OzjNdT5VU?n&vjkVTl4,VT5d`\Jgb@N ݛuou卣󩢽l@+ţë`IlI4P5NΕE! .ѱHԍ_`.ѓ6ɱ>i G٘p4) ngif(.~@r-6 B `+"Pnl h׭7ʁLX|E@TvΤOmiW_iݪcl@VዯN)L^%;nۋ.BH_ U-ǕvM `mWjZ>) ڂo+Seile[Tӓ2f):TeF!YN wQ>h͈mk3iy=2Һ[ `pDǪ fZL$Iu=ء<Ir'`y톶Ql'LG@`mo%HԒ"@ ]4ov@=M~WkVJ4x`# kgҶ-$:C[l^.>a]AZltӏfu !`MXn:.l]~>(*9+,i@)F^N^Aol{6`$U&R5ӘkTy!Ԫ,"! ӺEt!ni_u#lxwMOl5`L8Al:HvԆp^[)-kc`K4c+еq9Z+h7p__$bΌNӮv$NC~66_#rԧ[Tllus4u#a4j]ӖQ9f!ƂFVݟXkcy?߀7mc.vmȯ_ߦ4}flÐ ׈)֧55U)$ 4͙=dY熓9 e%`Fk>#`}W6/ǏZ7mU]H[(m[ɖ*J[c6OC~66_#rԧ[Tl,PH}\m \fat>`#`aw^9l)#lt>`#`{4Nh 49n*+o{lÐ ׈)֧55U)$<z؝I:mtMLܜ(Pl-2SI j8u:ԅa@ltȆ@ۀ 4A6*8Ms&i6 `u0 K(rkȫ ((R `M6a  l]/Z(0}$!F,dӜIڃMb@6*@*059CΌlBG*((R `M MB`kjPJXɦ9:mҫ,/^Z 8u:ԅa@l!BUkϙ,dӜIڃMb@6CQCp,b"uaofs'ݰ?/[11)iNeQ Ԡ[TU+`Fly Zf &:SϽ96 VY(jPAd@`DN{l`IX7}}ǀ{6`mV[͝u@lC ̳`+RKiA ` :uƀf}ln„ yf[w ,[SV&%ե[$W4v?~(n /tO?t/2te|llU`+*P`zRr<_ʔD7zz瘣}tך̶ s9gMrcjw/JleRRP HȝT @iL6RJ"q t4p]{ٳݯ~ctx0g`5"G} iMM)F  pݰmēNu"&Mr'p[x{g;J*GYٛm<\_g6akDح\Q_ *F}y[{:r8G|IsU}QwGeYf=zPlt>P +kv)D[lm`΅8EB-**vÍ6v_z% [DOlÐ ׈)֧55U)$6I^iWw?яo~FuQc֛-,d;Y6akD`Ӛ*R&o01c[c5ܧ?8/B#OS}LÎ`|l F: |Q[ƍIC7>ll; `կo}l9soƤ+M2śyc|l9[`#llFO6 :h\*i6RL4606ϟ﮽Z]vzw]c7M6g}C~IĝP';MWtwqi5uZ/*_>O;-d#C%t` , `M* @CmG! wl-[Z(vrk @;sOk\_zw'Ig1cymlQϡ LLX%ih4zӒ +6ު%`[ DJJ|@+p/bcfOfmƌ=s_pO=" l 硆C47ޘX s*4-(pK;=(CtqxEi2wb?4&{cNf|l*t|}?7pёUIJͳn"lX.lD6ޱ |( ~o[y駟v~_;nحl뮽aؑh,L^~}Q ގQW2qg>N;GmW_wm]|W{&Nt$3s}ZN1{'>~aO}gqƤܽ3>v>2>6خ P, F,M9ƓY};i k*_9FtrT4Sٳg 蠌8;F|E]Zw]7UjM>hv[m?5&m;(.3~| y|,$䕶l lwZu~ŋ]GpYS4.r&YuUT4 .z?>V"wt˻ɽk =SꫯvSNuz׻C=P:~GvHV;`+v,**`+2,[ǝVqlc /gK r!~ss믴;}֨b'p/g_}~u _Q`+s,&&%U6  Vfm `X;:8`تfʋ)7n\O>>^{mw/=M_ֵ`*~cq&'`تl]9, qU\Dl E}qf:o~MI `z{oK'?qYkz饗vvR;,[kXY1RSf몎a%`d(/ޫ[1n‹^L~2\Bg.>>3WkI[p+vש]{uB\{H= CJq2`L9]ؙO8'`)ꋻ7) `V\ѽA;+[nqN,>swzp I'?8u:)*pڴwq馛Xo*V*pH)NO,{vuebcјK AS槁ne_J V[̚,[5(Q^o5Oet4 MWt+~'I&KT}#i[`߸n̙nM6q]vJ">h ^mk{le .IJs'Jjzu!@j`y;nSUleRsX6|w/5],UwMWY=72 60)b>n-,/~-Zk/{x#8WM$@O.֋vr`Rl8XsVyE1.rL/=8J9,t}qrNY+뷢ߨ-;lS>{`Ѕ.nY"28?':֌PoTȪ.ĭY#_A-srX6|w/5]<n<}~޿+#Y(q va7=LJjVݙX-C2 8zKS 6) %t ,H5ձ0x˜]v؆i. 3,'4-(`m:xѾ{9O ,hu!u`F (UW]8W?]q-{'ƺn FtNl.i`32+Oízr*rh CULe)جR@f7 <`MQ3p0o|l-(ƍ`^KM)`5)f Vz$عs3L;n緿=u}nu`0g#pHؕ7 2pjZoK`ZM0B2I]F'߬ 1z7< {%jXl9=,t}qrN?ywH2KևDcN_=䓭O?{gZR9'4͙3=i6s܍78fX\`_6aklͣ#Zq0 ɴYp8X xװkf&y`Eohl&q"t3l?^jN{v_ߚXi˱&waMdۓ1&-# #-gle^llЧ3l8!eD``t7P{BϢ~x?*+.kڊXX6&Z&6`ئ틻t`~a{>k-raӦ^5^֒'MrIO؞wyg4V-[\IIVI Z>Pz?nr0Yc`>zl2#,"D`0~H,$<5}#ہ7k lթm9З-sxX6|w/5]NVW\]vYO}j pq ~I#YEf6A`[_ذ1qDZMlaهz9 ha%~[U7a.'` +Dz8`^Ծ9^┺qΝ$ԇպ'Nt>hk 췓hwXDb'IVC}_ó҂SK[gtvfz+aoLlHX-B6Ӱe8iw.V 4Mn)) l7`2P85Tfeƻr$H%Q`9(>iw-VYŽAIÇzgZZU@)͟?5& [ܦHl~o̬>Ԇ&cubz\j~ڞIּN.~ «~bn:6`*2< `~~ǝ݉'n[4VՒnXmlfkيZ?(YVK~cJ VӒ`k֩UCMtw'nrZs{y_waG?] `oJS>4ϺϥkZ%C~Uy~O6Ρ`تfʋtXf7pH]{üB0U :]1%0{^2+{&\"m7&q,=+UVC^U/m[>Rʯn iG6ʰXԍ&]1yP:nܳ#X`GIZ7rt5X5q/Χr|WEфjtWEJ+qI6&FbRAoL7<[}o[}׊pg?s>,r8ց-]K9]R˛&qzܸ/<<4OM q6 v\w(Hs/~1;mD@6&5`w˾uXa7« VuGS Rۥ:&qzvSwuRHzSz `E{#0U:.oʸq9o4Oʃ?qn) `ӢGZFG>æk]آ%:ޫ {z_k[dzRkXuSJZzdv+ӂb #9C/U γJ}مXF"y!#1Cq_(wm֛˧aWdڮr|hqͯ'{cfEfkKu]:Nos¶uƻUVHlSmQi[ITEXLmwv%pjzTi=dօ1jIk *-<@n`>\XEmk3so6ƸdOݍ̸}c.ȂLIm3hQ(4pRmKoz_A)Oگm*˺Lox87Oћ9ZO6`~tisn'u;ݷѱ[f7!I X&~reG^4idccNf~馛ZyiL qN@Gs{nPV{`+3 `ȦE}ƈɓkC l@LFi@6?<FoVWo Exa4эrS(-r<[كTAl ygE`oI"I8}pK u$/#cmR3fMWܵ5b3]hIlyv2zD=1}?To}^.?2~'tNaO_6oGE`J,9]Sgz;nO{olZ" aY7rCݏpYC,r[=SMCSVcYSuq63l`/`/V֟+t;o-Y";3ۂʴفoF7yd⣒ΛI8{.3&`zK7ÝA|ߐȾ!K}!~ HYP^i=?TY~Pcɩ"~}ʬ_ 1VUFaƣrt!. v34B\:9)\ q6/F;;v…(jwOtw,gq[1 RAM6iɞ6Hf2`XPش^El[;3Ap+36;qT'>ֱlYmvolҥP0z7#3ó7 ~;Xzԃp҅8?K6Ζf'ƍ7؝}٭%o4V([C}sђ7'tR+L_WUw}wt`|Xc~#1p' {m+&`m VVNhe%w%.N/oT\9l ^ܽ2,:unWٵ^{kllt9,_j jD5~XM&U&tMNK=sk']> %N4u5K<l`rVXK/= o~[{e7%@֢GS[nT`^S~HVl}pK.QO,ꖨW|s7wHq6NIcx^d|Ig}{ĉ< bOO"h9\sM `uI, vhS+,\ y8l>qNsm7d>ڱEMd<,7pC?N&)BmKuX& ui9, x+igV^yd79s渻;*mF; b2:j ؗmر8ȔlwZuqNmny]ҫ_|VRIز[VkMF_5/6=f-royt!Nho4xtșǸWo}[_ >jyE_Їw1`$]|NcvJ+ ;۔΃q{f9 ==06]DU^Q,cqI6=Fci~N:f|2:Z'- `$@˻7߼5[+lE`e}ahE~HV,[e%K;:ph?v6pWu_(0&SOM½.?|%uh={_.-{Mwͪ9Cm3[)ܯ!jiy|_GUuնSկo"r&ʗ6VTgǨ qEf:>4r( ӜYZۧ6% ScjOUKlFtWtsUW~~5]yDQFmC~Jv$.d&V(GnyEbɌSsp4_?lhہeFhU":΢VZykt8!Z?1l-Fw gs[׿͚5=tzʶNCatUU2XC&)F^ cQİk ч\ߏpg]8eV ~ 5^U6i9 `CX`Z `k񱂬^z4"?aD\K.QF: |Ql}ZSSE B_H~wa`$Er8)F+!Zqi6-fk1g4]3>vpIpt>`#`9$N6Iuni3a*hL^EA5f.ayy2-Ӵ(.c`p^Y6^b' 0g`5"G} iMM)F s~et6yVPiqx3Ӵ樖Zڤ[Mڤr-8!ZdSN x`#llFO>"H!x'G **@󣗊k"Ӣayiδe(!XDmipE:~[M(6d?B/lz`#llFO>"H!ꝡ( ƔY6=Yup\o ȷ6akD`Ӛ*Rwn Hn7-ӺZg-Yx67v Ԛ9ydoәÓ<@#`qhDH&v.TF˱`ͱzmtȆ@۫Z6:J8 `@ltȆ@N@]o-yf9R8\l6a  l]/Z(0}$!Fl)zc6,`qP` աygF6O!@`Hr=li6ڤ21]^[l5)$4ՠ@ 8E=c3؀&tK9K+ڀfVמ;`*( KE8LNlh (\S(Џ l7tY9'=1"g>x4lEw6P-4tn ou߰m5StQzZ}V-irl[VVvֹ 6&OpSOZ(PlygcJ~kM}䈙NTJߏjm<_tOu18c i_nx=_<[ePV}nCqY'( a~+bM:ԁ((C]`A_l>0pɄ!|k#AS&v "'-qgSg l6``UvL+ׇTw+׏>}PC}7}LCrG@@:`scEqMr6 i|} (PH\dX[N!b[uۀ͟WoxQ}#0ni.lA[FGyնp=oEd:16˲AͻN?WϟY՞Prom<0iܔll#e7!0jD2bҩIQ)P#COt},šVX+W7}6?ڦK琶LhI;ElZj\f"A6le.Š$[=-O3`ºt~! `Nʀm)h <i TujeZ;غ%(`uܰll@{3ݚ.ƊZd6 kҘ6ʟPj@ d=rC~\jw{ .}PaXArVt5m x/%o ɹa@6 Uw_n̶̎`ֶ M` (U/ T'-H ,, bqY&C CtY|my@,aFb}F5! B-[~ V0ya+606tU_?mFufaP*V+|zMoZln*P`}6Enu!#~u, :v `6 T )`2U %g4_>-7}60‚߳X34lYiՅXMi݊n.[AǨm&oC}=mk5jYA'̆[c]~O4-xdG Hj{-F-c Hֈ&>doUB1og-].e`qb؆KfZ& 3jGJG:^ـ` ix)]3T[liUyCUg@RSX`닗aD7,ۢ*dQ``Aæd뭿{hImkjڼJkJײ68v枚! +*P2P &>p#41=6;H-E B˦iMݨKtWYe~#4 PPP`KU{CF\OlVҏTۭ g Vٴq~7Y*{Ë"Cueu,] ڡV鐥v.0M8a@@@>P`Vx,SĹ&//6`jpi[ x!*Ϗ1MꯒU>BiWhuvn~bMOU;i, :oCDZ;\x[inG6MD@@Xy-%gaۀ`,:u5aTpçAOI58w֧V( / 4`UO[emh9B܏]4?Sr-5<@@@>P`+_{9Ѕ.60P6`p.^| !ASDg9 D6@LJ0kX#G[>4PPP Q`OD?5 >XF- `|lu7v5iZ]g+oo(FobT<@SS^5Ŷև0{_/r606ۅXiMռl^43沠 f}ȓne0>9g@T@k( TfWGpE }Q08XǟuHvi,3I kc` f/mcHjXwf^Ӵ6뱵V2ӗN;í &r"{s)r*e?~g;/ 9^6`6^˰?q 8#uXeڌ!̽;IFƝZYUi+˖Q9~j[Vȟ9 7Mp~n̳c4 BZoV)U ]rSԎ)zmǥuoTKqӶ17/ߩ+U:!h"jKɜ}Lij1Im֡2~mWڕ!VYvNa{*29US-p'N? r+(lh.SdVIfiхcWoD72}>_# SeH!nAl(jYfc3 O\=NtY3w^=w+ 0<:Y-8HC0{ .aRӇGƬɐ"O+pou -h_OhYT+ڧn[(,˩`؀u.xRETc-zVw; rZ?% $m8RowX/P9w+Rc.pS7lzE4¥tK5R}p9VN.3yJx> 7[I1[5z5vb],Xf]3M>h_v3}ԆAGui^3\_Lߠڱu3ѥEvNV[TUF*ol׾ct(6 `ՋqϺ[I}P 4m(`?g*_umJ ;CȠiӼ|LشYOSQi|v-xFiŰLJݼ12Lu|,ݝZꪓO^8ԉS `6- Zmⳗ6u0teUӱ]sgi-YŏԆ+X7c7s줭R:6 X4+CLZЀޒ0(-bc-'rqPlE^ߺ~k~yٜшaSY=CùflioO+#wYuL6l$*҂tlo˴'v2DXq&`ONg"f~7F۲?Pzn[qi{t3.gzp̱lh ,\H<҆]aj³6tMHC?SVC woMM+cV:Z]3ea\GY^3lUŶ~ւd8v /4&aպ+s- m &Zflk3u:`6o[uҤgVC O&ZD:N?3p8C擫>}:u!qVIM6Z/BP A?7ʪ1MOVT9G|rmɋ"8QN6 `W<ۭj/N8=qӜaжav0d+)Y-Xծ/oijpgeZy~WN_ZS ֵڇh) @]G<vf!- ^"MhԺ b,}8VaSW^y{Xc?~șq, `6PO8//_p4yp{g跇˾KP>oj@kvk3 P~Lruc#i5?3\/GF̎umq *c4՝#\1vۑꍌ9)Iq 9 ows?snvl6`=sv:Ə_)<Mf};_qr-{9@@@@@X>)GEn_Tt2kФE?lB:h=BW/5츲8+K[t~e{ɋ(((((0 &YIQBXK?. ԥAky;gvv_yŜ{<l r< @KXA4m 1S'vEt v1GR"WTo)Xдv|1W<(((((Sذq^l0TENm't,>Eu!NXt&1݆4PPPPP Fvu~Eo"vݍCTF'-ke+Ϗ8eGcifg2&im^<4λiۂE۵ʻ1L@@@@@V `Ɍ|P ZwP6*Auv]~V7+ kQSՍZ2mjV9k;k_uUNkw]fZ~Ktژ`ٕ_Kyɩ X+ XZɴҢMб>M^P~g &X #ifAFuOx]Z4S~0rx6TyO(l 1~lY^]GficZ鬺 ,× iv⃯-¬sL%;@@@@@h)``֭VpQfЬm*fz> U )+j],=L7^K:㳺 !녅I>67k4]eQPPPP\?2h]j53P6oW+ˠ&EL>٘6mتtg>6"t6i^0OLd9\Hk ۖG@@@@T @;5 R$UaFqCP[+*X#UldNֵ8'eq-pM!=9]@@@@@_]OТ(((((}mσr]G6 _ce{U10N+W/j† @* pM8v%#x ۮV_nj. YPcF ڭYݙ^aLC;6jjf9_BMUF Ĵ!.Tڷ3ߞAu>mZ:ԝ(((((mH*smR%e߾ U *Q!ژU}Y3qaQW>JjYGtvܬˡsY9w V~pib6L.Ŵ!.o߀6\Z&,˺lG@@@@P@PNdvx3yT`Ӡ %BX lڸ߬(i AݱX@^0&FK6(iFѭ6X=ۆłoi܇UE#n 4Am18оp,mI!LBuv3e bISUWeHkC<]p[xiggڤ`^;cڐVFڶMɿo:<6Mա (((((PR0;ƕX Ni|Ӝ Ӗ`U+tlܱ̃S]> 9 PPPPU!OF% %`Mjp)pI2 ` " D?cam 1]v.YjB _b!ͮm>Q~Ft=qj?XV3 qv@@@@l_|Jn3kG- -QP^nYT"t?Vg8mW.;,2=m[x}ٙv/\{}YHp;_}^b#u#,_ٔl(C|V&(ە& duLZT-pF[ՙ,_EwϬxڢ㲎-]^Ӯm;߼6䝃owl[ Ŷ|((((( P mңe3}hNA4PPPPPPP4T~7>wFꖜ PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KEmWIENDB`manila-2.0.0/doc/source/images/rpc/arch.svg0000664000567000056710000004404712701407107021657 0ustar jenkinsjenkins00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Manila-Manage Manila-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP addres... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method manila-2.0.0/doc/source/adminref/0000775000567000056710000000000012701407265017751 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/adminref/index.rst0000664000567000056710000000130212701407107021601 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Admin Reference --------------- .. toctree:: :maxdepth: 3 intro quick_start multi_backends network_plugins manila-2.0.0/doc/source/adminref/multi_backends.rst0000664000567000056710000000704512701407107023470 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Configure multiple back ends ============================ An administrator can configure an instance of Manila to provision shares from one or more back ends. Each back end leverages an instance of a vendor-specific implementation of the Manila driver API. The name of the back end is declared as a configuration option share_backend_name within a particular configuration stanza that contains the related configuration options for that back end. Administrators can specify that a particular share type be explicitly associated with a single back end by including the extra spec share_backend_name with the name specified within the back end configuration stanza. When the Manila scheduler receives a provisioning request for a share type with this extra spec set, it will fulfill the share provisioning request on the specified back end (assuming all other scheduling criteria including available capacity are met). Enable multiple back ends ========================= To enable multiple share back ends, you must set the enabled_share_backends flag in the manila.conf file. This flag defines the names (separated by a comma) of the configuration stanzas for the different back ends: one name is associated to one configuration group for a back end. The following example shows five configured back ends:: [DEFAULT] enabled_share_backends=backendEMC1,backendEMC2,backendGeneric1,backendGeneric2,backendNetApp [backendEMC1] share_driver=manila.share.drivers.emc.driver.EMCShareDriver share_backend_name=backendEMC1 emc_share_backend=vnx emc_nas_server=1.1.1.1 emc_nas_password=password emc_nas_login=user emc_nas_server_container=server_2 emc_nas_pool_name="Pool 1" [backendEMC2] share_driver=manila.share.drivers.emc.driver.EMCShareDriver share_backend_name=backendEMC2 emc_share_backend=vnx emc_nas_server=1.1.1.1 emc_nas_password=password emc_nas_login=user emc_nas_server_container=server_3 emc_nas_pool_name="Pool 2" [backendGeneric1] share_driver=manila.share.drivers.generic.GenericShareDriver share_backend_name=one_name_for_two_backends service_instance_user=ubuntu_user service_instance_password=ubuntu_user_password service_image_name=ubuntu_image_name path_to_private_key=/home/foouser/.ssh/id_rsa path_to_public_key=/home/foouser/.ssh/id_rsa.pub [backendGeneric2] share_driver=manila.share.drivers.generic.GenericShareDriver share_backend_name=one_name_for_two_backends service_instance_user=centos_user service_instance_password=centos_user_password service_image_name=centos_image_name path_to_private_key=/home/baruser/.ssh/id_rsa path_to_public_key=/home/baruser/.ssh/id_rsa.pub [backendNetApp] share_driver = manila.share.drivers.netapp.common.NetAppDriver driver_handles_share_servers = True share_backend_name=backendNetApp netapp_login=user netapp_password=password netapp_server_hostname=1.1.1.1 netapp_root_volume_aggregate=aggr01 manila-2.0.0/doc/source/adminref/quick_start.rst0000664000567000056710000004670412701407107023042 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Quick start =========== This document describes how to install Manila from the OpenStack `Kilo` release. Note that the process differs from previous releases and is likely to change again in subsequent releases. Manila consists of three main services, which are similar to those of the OpenStack Cinder project: - manila-api - manila-scheduler - manila-share Installation of first two - `manila-api` and `manila-scheduler` is common for almost all deployments. But configuration of `manila-share` is backend-specific and can differ from deployment to deployment. Hence, this doc will cover just some specific case. Hence, this document will cover a single use case, configuring the "Generic" driver that uses the Cinder project as its backend. Note that the `manila-share` service can run in two modes, with and without handling of `share servers`. In most cases share servers are virtual machines that export file shares via various network file systems. The example in this document describes a backend that manages share servers using network resources provided by Neutron. .. note:: Manila supports any network architecture. When a driver is managing its own share servers, it can use any of several network plug-ins that provide network resources. Manila includes plug-ins for Neutron and Nova-network, as well as a `StandaloneNetworkPlugin` for simple networks. When a driver is not managing share servers, it has no need for network plug-ins. Prerequisites ------------- - MySQL database - RabbitMQ message bus - OpenStack Keystone - Git For Generic driver: - OpenStack Nova - OpenStack Neutron - OpenStack Cinder Steps to perform ================ - Installation of Manila binaries - Installation of Manila client - Registration in Keystone - Preparation of external files (configs, etc...) - Basic configuration of Manila - Database setup - Running Manila services - Creation of pilot share Installation of Manila binaries ------------------------------- Manila binaries may be installed using various distribution packages or from source code. In our case we will use the latter, installation by cloning a git repository. Clone repo:: $ git clone -b stable/kilo https://github.com/openstack/manila Then run the installation script:: $ sudo python setup.py install It will install the Manila binaries and their dependencies. These are the expected binaries: - manila-all - manila-api - manila-manage - manila-scheduler - manila-share Installation of Manila client ----------------------------- To send requests to Manila we need to install the Manila client. Install it using PIP:: $ sudo pip install python-manilaclient>=1.0.4 .. note:: The starting version of the Manila client for Kilo release is 1.0.4 The above will install the Manila binary that will be used for issuing manila requests. Registration in Keystone ------------------------ Like all other OpenStack projects, Manila should be registered with Keystone. Here are the registration steps, similar to those of Cinder: 1) Create Manila service user:: $ keystone user-create --name manila --pass %PASSWORD% 2) Add the admin role to the Manila user:: $ keystone user-role-add --user manila --tenant service --role admin .. note:: Tenant/project may differ, but it should be the same as for all other service users such as ‘cinder’, ‘nova’, etc. 3) Create the Manila service entities:: $ keystone service-create \ --name manila \ --type share \ --description "OpenStack Shared Filesystems" $ keystone service-create \ --name manilav2 \ --type sharev2 \ --description "OpenStack Shared Filesystems V2" Result:: +-------------+----------------------------------+ | Property | Value | +-------------+----------------------------------+ | description | OpenStack Shared Filesystems | | enabled | True | | id | 4c13e9ff7ec04f4e95a26f72ecdf9919 | | name | manila | | type | share | +-------------+----------------------------------+ +-------------+----------------------------------+ | Property | Value | +-------------+----------------------------------+ | description | OpenStack Shared Filesystems V2 | | enabled | True | | id | 2840d1e7b033437f8776a7bd5045b28d | | name | manilav2 | | type | sharev2 | +-------------+----------------------------------+ 4) Create the Share Filesystems service API endpoints:: $ keystone endpoint-create \ --service-id $(keystone service-list | awk '/ share / {print $2}') \ --publicurl http://%controller%:8786/v1/%\(tenant_id\)s \ --internalurl http://%controller%:8786/v1/%\(tenant_id\)s \ --adminurl http://%controller%:8786/v1/%\(tenant_id\)s \ --region regionOne $ keystone endpoint-create \ --service-id $(keystone service-list | awk '/ sharev2 / {print $2}') \ --publicurl http://%controller%:8786/v2/%\(tenant_id\)s \ --internalurl http://%controller%:8786/v2/%\(tenant_id\)s \ --adminurl http://%controller%:8786/v2/%\(tenant_id\)s \ --region regionOne Result:: +-------------+-------------------------------------------+ | Property | Value | +-------------+-------------------------------------------+ | adminurl | http://%controller%:8786/v1/%(tenant_id)s | | id | c1984777db6941919657d15b25f05c94 | | internalurl | http://%controller%:8786/v1/%(tenant_id)s | | publicurl | http://%controller%:8786/v1/%(tenant_id)s | | region | regionOne | | service_id | 4c13e9ff7ec04f4e95a26f72ecdf9919 | +-------------+-------------------------------------------+ +-------------+-------------------------------------------+ | Property | Value | +-------------+-------------------------------------------+ | adminurl | http://%controller%:8786/v2/%(tenant_id)s | | id | 63ddffd27e8c4c62b4ffb228083325e6 | | internalurl | http://%controller%:8786/v2/%(tenant_id)s | | publicurl | http://%controller%:8786/v2/%(tenant_id)s | | region | regionOne | | service_id | 2840d1e7b033437f8776a7bd5045b28d | +-------------+-------------------------------------------+ .. note:: Port ‘8786’ is the default port for Manila. It may be changed to any other port, but this change should also be made in the Manila configuration file using opt ‘osapi_share_listen_port’ which defaults to ‘8786’. Preparation of external files ----------------------------- Copy files from %git_dir%/etc/manila to dir ‘/etc/manila’:: policy.json api-paste.ini rootwrap.conf rootwrap.d/share.filters Then generate a config sample file using tox:: $ tox -e genconfig This will create a file with the latest config options and their descriptions:: ‘%git_dir%/etc/manila/manila.conf.sample’ Copy this file to the same directory as the above files, removing the suffix ‘.sample’ from its name:: $ cp %git_dir%/etc/manila/manila.conf.sample /etc/manila/manila.conf .. note:: Manila configuration file may be used from different places. `/etc/manila/manila.conf` is one of expected paths by default. Basic configuration of Manila ----------------------------- In our case we will set up one backend with generic driver (using Cinder as its backend) configured to manage its own share servers. Open Manila configuration file `/etc/manila/manila.conf`:: [keystone_authtoken] signing_dir = /var/cache/manila admin_password = %password_we_used_with_user_creation_operation% admin_user = manila admin_tenant_name = %service_project_name_we_used_with_user_creation_operation% auth_protocol = http auth_port = 35357 auth_host = %address_of_machine_with_keystone_endpoint% [DATABASE] # Set up MySQL connection. In following ‘foo’ is username, # ‘bar’ is password and ‘quuz’ is host name or address: connection = mysql+pymysql://foo:bar@quuz/manila?charset=utf8 [oslo_concurrency] # Following opt defines directory to be used for lock files creation. # Should be owned by user that runs manila-share processes. # Defaults to env var ‘OSLO_LOCK_PATH’. It is used by manila-share services # and is required to be set up. Make sure this dir is created and owned # by user that run manila-share services. lock_path = /etc/manila/custom_manila_lock_path [DEFAULT] # Set pretty logging output. Not required, but may be useful. logging_exception_prefix = %(color)s%(asctime)s.%(msecs)d TRACE %(name)s ^[[01;35m%(instance)s^[[00m logging_debug_format_suffix = ^[[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d^[[00m logging_default_format_string = %(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [^[[00;36m-%(color)s] ^[[01;35m%(instance)s%(color)s%(message)s^[[00m logging_context_format_string = %(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [^[[01;36m%(request_id)s ^[[00;36m%(user_id)s %(project_id)s%(color)s] ^[[01;35m%(instance)s%(color)s%(message)s^[[00m # Set auth strategy for usage of Keystone auth_strategy = keystone # Set message bus creds rabbit_userid = %rabbit_username% rabbit_password = %rabbit_user_password% rabbit_hosts = %address_of_machine_with_rabbit% rpc_backend = rabbit # Following opt is used for definition of share backends that should be enabled. # Values are conf groupnames that contain per manila-share service opts. enabled_share_backends = london # Enable protocols ‘NFS’ and ‘CIFS’ as those are the only supported # by Generic driver that we are configuring in this set up. # All available values are (‘NFS’, ‘CIFS’, ‘GlusterFS’, ‘HDFS’) enabled_share_protocols = NFS,CIFS # Following is password for user ‘neutron’ for interaction with Neutron. # It is required only when Neutron is set up in lab, and handling of # share servers is used within configured share drivers. neutron_admin_password = %password% # Following is password for user ‘cinder’ for interaction with Cinder service. # Used only by Generic driver. cinder_admin_password = %password% # Following is password for user ‘nova’ for interaction with Nova service. # Used only by Generic driver for the moment. nova_admin_password = %password% # Set the project/tenant name of the ‘service’ tenant. These should all be the # same value, but may be different than the default. neutron_admin_project_name = service cinder_admin_tenant_name = service nova_admin_tenant_name = service # Manila requires ‘share-type’ for share creation. # So, set here name of some share-type that will be used by default. default_share_type = default_share_type state_path = /opt/stack/data/manila osapi_share_extension = manila.api.contrib.standard_extensions rootwrap_config = /etc/manila/rootwrap.conf api_paste_config = /etc/manila/api-paste.ini share_name_template = share-%s # Set scheduler driver with usage of filters. Recommended. scheduler_driver = manila.scheduler.drivers.filter.FilterScheduler # Set following opt to ‘True’ to get more info in logging. debug = True [london] # This is custom opt group that is used for storing opts of share-service. # This one is used only when enabled using opt `enabled_share_backends` # from DEFAULT group. # Set usage of Generic driver which uses Cinder as backend. share_driver = manila.share.drivers.generic.GenericShareDriver # Generic driver supports both driver modes - with and without handling # of share servers. So, we need to define explicitly which one we are # enabling using this driver. driver_handles_share_servers = True # Generic driver is the only driver that uses image from Glance for building # service VMs in Nova. And following are data for some specific image. # We used one defined in [1] # [1] https://github.com/openstack/manila/blob/6785cad9/devstack/plugin.sh#L86 service_instance_password = ubuntu service_instance_user = ubuntu service_image_name = ubuntu_1204_nfs_cifs # These will be used for keypair creation and inserted into service VMs. path_to_private_key = /home/stack/.ssh/id_rsa path_to_public_key = /home/stack/.ssh/id_rsa.pub # Custom name for share backend. share_backend_name = LONDON .. note:: The Generic driver does not use network plugins, so none is part of the above configuration. Other drivers that manage their own share servers may require one of Manila's network plug-ins. Database setup -------------- Manila supports different SQL dialects in theory, but it is only tested with MySQL, so this step assumes that MySQL has been installed. Create the database for Manila:: $ mysql -u%DATABASE_USER% -p%DATABASE_PASSWORD% -h%MYSQL_HOST% -e "DROP DATABASE IF EXISTS manila;" $ mysql -u%DATABASE_USER% -p%DATABASE_PASSWORD% -h%MYSQL_HOST% -e "CREATE DATABASE manila CHARACTER SET utf8;" Then create Manila's tables and apply all migrations:: $ manila-manage db sync Here is the list of tables for the Kilo release of Manila:: +--------------------------------------------+ | Tables_in_manila | +--------------------------------------------+ | alembic_version | | network_allocations | | project_user_quotas | | quota_classes | | quota_usages | | quotas | | reservations | | security_services | | services | | share_access_map | | share_export_locations | | share_metadata | | share_network_security_service_association | | share_networks | | share_server_backend_details | | share_servers | | share_snapshots | | share_type_extra_specs | | share_type_projects | | share_types | | shares | +--------------------------------------------+ Running Manila services ----------------------- Run manila-api first:: $ manila-api \ --config-file /etc/manila/manila.conf & \ echo $! >/opt/stack/status/stack/m-api.pid; \ fg || echo "m-api failed to start" | \ tee "/opt/stack/status/stack/m-api.failure" Create a default share type before running `manila-share` service:: $ manila type-create default_share_type True Where `default_share_type` is custom name of `share-type` and `True` is value for required extra-spec `driver_handles_share_servers`. These are required params for creation of `share-type`. Result:: +-----------+--------------------+------------+------------+-------------------------------------+ | ID | Name | Visibility | is_default | required_extra_specs | +-----------+--------------------+------------+------------+-------------------------------------+ | %some_id% | default_share_type | public | - | driver_handles_share_servers : True | +-----------+--------------------+------------+------------+-------------------------------------+ Service `manila-api` may be restarted to get updated information about `default share type`. So, get list of share types after restart of service `manila-api`:: $ manila type-list Result:: +-----------+----------------------------+------------+------------+--------------------------------------+ | ID | Name | Visibility | is_default | required_extra_specs | +-----------+----------------------------+------------+------------+--------------------------------------+ | %some_id% | default_share_type | public | YES | driver_handles_share_servers : True | +-----------+----------------------------+------------+------------+--------------------------------------+ Add any additional extra specs to `share-type` if needed using following command:: $ manila type-key default_share_type set key=value This may be viewed as follows:: $ manila extra-specs-list Run manila-scheduler:: $ manila-scheduler \ --config-file /etc/manila/manila.conf & \ echo $! >/opt/stack/status/stack/m-sch.pid; \ fg || echo "m-sch failed to start" | \ tee "/opt/stack/status/stack/m-sch.failure" Run manila-share:: $ manila-share \ --config-file /etc/manila/manila.conf & \ echo $! >/opt/stack/status/stack/m-shr.pid; \ fg || echo "m-shr failed to start" | \ tee "/opt/stack/status/stack/m-shr.failure" Creation of pilot share ----------------------- In this step we assume that the following services are running: - Keystone - Nova (used by Generic driver, not strict dependency of Manila) - Neutron (default network backend for Generic driver, used when driver handles share servers) - Cinder (used by Generic driver) To operate a driver that handles share servers, we must create a `share network`, which is a set of network information that will be used during share server creation. In our example, to use Neutron, we will do the following:: $ neutron net-list Here we note the ID of a Neutron network and one of its subnets. .. note:: Some configurations of the Generic driver may require this network be attached to a public router. It is so by default. So, if you use the default configuration of Generic driver, make sure the network is attached to a public router. Then define a share network using the Neutron network and subnet IDs:: $ manila share-network-create \ --name test_share_network \ --neutron-net-id %id_of_neutron_network% \ --neutron-subnet-id %id_of_network_subnet% Now we can create a share using the following command:: $ manila create NFS 1 --name testshare --share-network test_share_network The above command will instruct Manila to schedule a share for creation. Once created, configure user access to the new share before attempting to mount it via the network:: $ manila access-allow testshare ip 0.0.0.0/0 --access-level rw We added read-write access to all IP addresses. Now, you can try mounting this NFS share onto any host. manila-2.0.0/doc/source/adminref/network_plugins.rst0000664000567000056710000002137712701407107023742 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila Network Plugins ====================== The Manila architecture defines an abstraction layer for network resource provisioning, and it provides a number of concrete `network plugins`, allowing administrators to choose from a variety of options for how network resources are assigned to their tenants' networked storage. This document describes how network plugins may be configured and used in Manila. What is a network plugin in Manila? ----------------------------------- A network plugin is a python class that uses a specific facility (e.g. Neutron or Nova network) to provide network resources to the :term:`manila-share` service. When to use a network plugin? ----------------------------- A Manila `share driver` may be configured in one of two modes, where it is managing the lifecycle of `share servers` on its own or where it is merely providing storage resources on a pre-configured share server. This mode is defined using the boolean option `driver_handles_share_servers` in the Manila configuration file. A network plugin is only useful when a driver is handling its own share servers. .. note:: Not all share drivers support both modes. Each driver must report which mode(s) it supports to the manila-share service. When `driver_handles_share_servers` is set to `True`, a share driver will be called to create share servers for shares using information provided within a `share network`. This information will be provided to one of the enabled network plugins that will handle reservation, creation and deletion of network resources including `IP addresses` and `network interfaces`. As an exception, any share drivers that use Nova for the creation of share servers should use some wrapper for network plugins, because Nova handles the creation of network resources for its VMs. In the Manila Kilo version, only the `Generic driver` uses Nova with the help of its own `network helpers`, which serve the same needs but are related only to this share driver. .. _what_network_plugins_are_available: What network plugins are available? ----------------------------------- There are three different network plugins and five python classes in Manila: 1 Network plugin for using the `OpenStack` networking project `Neutron`. It allows one to use any network segmentation that Neutron supports. It is up to each share driver to support at least one network segmentation type. 1.1 `manila.network.neutron.neutron_network_plugin.NeutronNetworkPlugin`. This is the default network plugin. It requires that `neutron_net_id` and `neutron_subnet_id` are provided when defining the share network that will be used for the creation of share servers. The user may define any number of share networks corresponding to the various physical network segments in a tenant environment. 1.2 `manila.network.neutron.neutron_network_plugin.NeutronSingleNetworkPlugin`. This is a simplification of the previous case. It accepts values for `neutron_net_id` and `neutron_subnet_id` from the Manila configuration file and uses one network for all shares. When only a single network is needed, the NeutronSingleNetworkPlugin (1.2) is a simple solution. Otherwise NeutronNetworkPlugin (1.1) should be chosen. 2 Network plugin for working with OpenStack native networking from `Nova`. It supports either flat networks or VLAN-segmented networks. 2.1 `manila.network.nova_network_plugin.NovaNetworkPlugin`. This plugin serves the networking needs when `Nova networking` is configured in the cloud instead of Neutron. It requires a single parameter, `nova_net_id`. 2.2 `manila.network.nova_network_plugin.NovaSingleNetworkPlugin`. This one works in the same way as the previous one with one difference. It takes nova_net_id from the Manila configuration file and creates share servers using only one network. When only a single network is needed, the NovaSingleNetworkPlugin (2.2) is a simple solution. Otherwise NovaNetworkPlugin (1.1) should be chosen. 3 Network plugin for specifying networks independently from OpenStack networking services. 3.1 `manila.network.standalone_network_plugin.StandaloneNetworkPlugin`. This plug-in uses a pre-existing network that is available to the manila-share host. This network may be handled either by OpenStack or be created independently by any other means. The plugin supports any type of network - flat and segmented. As above, it is completely up to the driver to support the network type for which the network plugin is configured. .. note:: These network plugins were introduced in the OpenStack Kilo release. In the OpenStack Juno version, only NeutronNetworkPlugin is available. Plugins in 1.2, 2.2, and 3.1 all ignore what the user supplies in the share_network and instead always provide IP addresses from a single network. Approaches for setup of network plugins --------------------------------------- Each manila-share service may have its own network plugin or one that is shared with other services. All configuration options for network plugins may be set in three ways by priorities: - Using a separate configuration group. For this case, the config opt `network_config_group` should be defined in the config group of the manila-share service and have the name of the config group with the defined options for the network plugin. First priority. - Using config group of manila-share service. Second priority. - Using config group `[DEFAULT]`. Lowest priority. A specific network plugin is enabled by setting the configuration option `network_api_class` to one of the values defined in the previous section :ref:`what_network_plugins_are_available`. This option can be defined in any of the approaches above along with options for the network plugin itself. Example of network plugin configuration --------------------------------------- Let's configure three manila-share services that use different approaches for configuration of network plugins. As noted in section :ref:`what_network_plugins_are_available`, in the Kilo version of OpenStack there are 5 (five) network plugins, three of which require configuration options - 1.2, 2.2 and 3.1. We will use a configuration example using network plugin 1.2, the NeutronSingleNetworkPlugin. Here is the configuration:: [DEFAULT] enabled_share_backends = SHARE_BACKEND_1,SHARE_BACKEND_2,SHARE_BACKEND_3 network_api_class = manila.network.neutron.neutron_network_plugin.NeutronSingleNetworkPlugin neutron_net_id = neutron_net_id_DEFAULT neutron_subnet_id = neutron_subnet_id_DEFAULT [NETWORK_PLUGIN] neutron_net_id = neutron_net_id_NETWORK_PLUGIN neutron_subnet_id = neutron_subnet_id_NETWORK_PLUGIN [SHARE_BACKEND_1] # This share backend is enabled for handling of share servers using opts # for network plugin defined in separate config group called `NETWORK_PLUGIN`. network_config_group = NETWORK_PLUGIN driver_handles_share_servers = True [SHARE_BACKEND_2] # This share backend is enabled for handling of share servers using opts # defined in its own config group. driver_handles_share_servers = True neutron_net_id = neutron_net_id_SHARE_BACKEND_2 neutron_subnet_id = neutron_subnet_id_SHARE_BACKEND_2 [SHARE_BACKEND_3] # This share backend is enabled for handling of share servers using opts # defined in config group [DEFAULT]. driver_handles_share_servers = True Here is a list of neutron_net_id and neutron_subnet_id values for our manila-share services: - [SHARE_BACKEND_1] - neutron_net_id=neutron_net_id_NETWORK_PLUGIN - neutron_subnet_id=neutron_subnet_id_NETWORK_PLUGIN - [SHARE_BACKEND_2] - neutron_net_id=neutron_net_idSHARE_BACKEND_2 - neutron_subnet_id=neutron_subnet_id_SHARE_BACKEND_2 - [SHARE_BACKEND_3] - neutron_net_id=neutron_net_id_DEFAULT - neutron_subnet_id=neutron_subnet_id_DEFAULT The value for option network_api_class was taken by each manila-share service from group [DEFAULT] because it was not redefined in other places. .. note:: The last approach - use of [DEFAULT] group - is not preferred for setting network plugin options and will generate warnings in your manila-share logs. Either of the first two approaches is recommended. manila-2.0.0/doc/source/adminref/intro.rst0000664000567000056710000000245112701407107021633 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Introduction to Manila Shared Filesystem Management Service =========================================================== :term:`Manila` is the File Share service project for OpenStack. To administer the OpenStack File Share service, it is helpful to understand a number of concepts like share networks, shares, multi-tenancy and back ends that can be configured with Manila. When configuring the File Share service, it is required to declare at least one back end. Manila can be configured to run in a single-node configuration or across multiple nodes. Manila can be configured to provision shares from one or more back ends. The OpenStack File Share service allows you to offer file-share services to users of an OpenStack installation. manila-2.0.0/doc/source/devref/0000775000567000056710000000000012701407265017437 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/source/devref/generic_driver.rst0000664000567000056710000001020412701407107023150 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generic approach for share provisioning ======================================= The Shared File Systems service can be configured to use Nova VMs and Cinder volumes. There are two modules that handle them in manila: 1) 'service_instance' module creates VMs in Nova with predefined image called service image. This module can be used by any backend driver for provisioning of service VMs to be able to separate share resources among tenants. 2) 'generic' module operates with Cinder volumes and VMs created by 'service_instance' module, then creates shared filesystems based on volumes attached to VMs. Network configurations ---------------------- Each backend driver can handle networking in its own way, see: https://wiki.openstack.org/wiki/Manila/Networking One of two possible configurations can be chosen for share provisioning using 'service_instance' module: - Service VM has one net interface from net that is connected to public router. For successful creation of share, user network should be connected to public router too. - Service VM has two net interfaces, first one connected to service network, second one connected directly to user's network. Requirements for service image ------------------------------ - Linux based distro - NFS server - Samba server >=3.2.0, that can be configured by data stored in registry - SSH server - Two net interfaces configured to DHCP (see network approaches) - 'exportfs' and 'net conf' libraries used for share actions - Following files will be used, so if their paths differ one needs to create at least symlinks for them: * /etc/exports (permanent file with NFS exports) * /var/lib/nfs/etab (temporary file with NFS exports used by 'exportfs') * /etc/fstab (permanent file with mounted filesystems) * /etc/mtab (temporary file with mounted filesystems used by 'mount') Supported shared filesystems ---------------------------- - NFS (access by IP) - CIFS (access by IP) Known restrictions ------------------ - One of Nova's configurations only allows 26 shares per server. This limit comes from the maximum number of virtual PCI interfaces that are used for block device attaching. There are 28 virtual PCI interfaces, in this configuration, two of them are used for server needs and other 26 are used for attaching block devices that are used for shares. - Juno version works only with Neutron. Each share should be created with neutron-net and neutron-subnet IDs provided via share-network entity. - Juno version handles security group, flavor, image, keypair for Nova VM and also creates service networks, but does not use availability zones for Nova VMs and volume types for Cinder block devices. - Juno version does not use security services data provided with share-network. These data will be just ignored. - Liberty version adds a share extend capability. Share access will be briefly interrupted during an extend operation. - Liberty version adds a share shrink capability, but this capability is not effective because generic driver shrinks only filesystem size and doesn't shrink the size of Cinder volume. The :mod:`manila.share.drivers.generic` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.generic :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.service_instance` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.service_instance :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/jenkins.rst0000664000567000056710000000340112701407107021623 0ustar jenkinsjenkins00000000000000Continuous Integration with Jenkins =================================== Manila uses a `Jenkins`_ server to automate development tasks. The Jenkins front-end is at http://jenkins.openstack.org. You must have an account on `Launchpad`_ to be able to access the OpenStack Jenkins site. Jenkins performs tasks such as: `gate-manila-pep8`_ Run PEP8 checks on proposed code changes that have been reviewed. `gate-manila-pylint`_ Run Pylint checks on proposed code changes that have been reviewed. `gate-manila-python27`_ Run unit tests using python2.7 on proposed code changes that have been reviewed. `gate-manila-python34`_ Run unit tests using python3.4 on proposed code changes that have been reviewed. `manila-coverage`_ Calculate test coverage metrics. `manila-docs`_ Build this documentation and push it to http://docs.openstack.org/developer/manila. `manila-merge-release-tags`_ Merge reviewed code into the git repository. `manila-tarball`_ Do ``python setup.py sdist`` to create a tarball of the manila code and upload it to http://tarballs.openstack.org/manila/ .. _Jenkins: http://jenkins-ci.org .. _Launchpad: http://launchpad.net .. _gate-manila-pep8: https://jenkins.openstack.org/job/gate-manila-pep8 .. _gate-manila-pylint: https://jenkins.openstack.org/job/gate-manila-pylint .. _gate-manila-python27: https://jenkins.openstack.org/job/gate-manila-python27/ .. _gate-manila-python34: https://jenkins.openstack.org/job/gate-manila-python34/ .. _manila-coverage: https://jenkins.openstack.org/job/manila-coverage .. _manila-docs: https://jenkins.openstack.org/job/manila-docs .. _manila-merge-release-tags: https://jenkins.openstack.org/job/manila-merge-release-tags .. _manila-tarball: https://jenkins.openstack.org/job/manila-tarball manila-2.0.0/doc/source/devref/index.rst0000664000567000056710000000454012701407107021276 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Developer Guide =============== In this section you will find information on manila's lower level programming APIs. Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 3 development.environment unit_tests addmethod.openstackapi Background Concepts for manila ------------------------------ .. toctree:: :maxdepth: 3 architecture threading i18n rpc driver_requirements pool-aware-manila-scheduler Other Resources --------------- .. toctree:: :maxdepth: 3 launchpad gerrit jenkins API Reference ------------- .. toctree:: :maxdepth: 3 api api_microversion_dev api_microversion_history experimental_apis Module Reference ---------------- .. toctree:: :maxdepth: 3 intro services database share share_hooks auth api scheduler fakes manila ganesha Capabilities and Extra-Specs ---------------------------- .. toctree:: :maxdepth: 3 capabilities_and_extra_specs export_location_metadata Share backends feature support mapping -------------------------------------- .. toctree:: :maxdepth: 3 share_back_ends_feature_support_mapping Share backends -------------- .. toctree:: :maxdepth: 3 zfs_on_linux_driver netapp_cluster_mode_driver emc_isilon_driver emc_vnx_driver generic_driver glusterfs_driver glusterfs_native_driver cephfs_native_driver gpfs_driver huawei_nas_driver hdfs_native_driver hds_hnas_driver hpe_3par_driver tegile_driver Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` manila-2.0.0/doc/source/devref/hds_hnas_driver.rst0000664000567000056710000004063212701407107023333 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Hitachi Data Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Hitachi HNAS manila driver ========================== ------------------ Driver Version 1.0 ------------------ This OpenStack manila driver provides support for Hitachi Data Systems (HDS) NAS Platform Models 3080, 3090, 4040, 4060, 4080 and 4100. HNAS Storage Requirements ''''''''''''''''''''''''' Before using Hitachi HNAS manila driver, use the HNAS configuration and management utilities, such as GUI (SMU) or SSC CLI to create a storage pool (span) and an EVS. Also, check that HNAS/SMU software version is 12.2 or higher. Supported Operations '''''''''''''''''''' The following operations are supported in this version of manila HNAS driver: - Create and delete NFS shares; - Extend NFS shares; - Manage rules to NFS shares (allow/deny access); - Manage and unmanage NFS shares; - Create and delete snapshots; - Create shares from snapshots. Driver Configuration '''''''''''''''''''' To configure the driver, make sure that the controller and compute nodes have access to the HNAS management port, and compute and neutron nodes have access to the data ports (EVS IPs or aggregations). If manila-share service is not running on controller node, it must have access to the management port. The driver configuration can be summarized in the following steps: | 1) Create a file system to be used by manila on HNAS. Make sure that the filesystem is not created as a replication target. Refer to Hitachi HNAS reference for detailed steps on how to do this; | 2) Install and configure an OpenStack environment with default manila parameters and services. Refer to OpenStack manila configuration reference; | 3) Configure HNAS parameters on manila.conf; | 4) Prepare the network; | 5) Configure/create share type; | 6) Restart the services; | 7) Configure the network. In the following sections we cover steps 3, 4, 5, 6 and 7. Steps 1 and 2 are not in the scope of this document. Step 3 - HNAS Parameters Configuration ************************************** The following parameters need to be configured in the [DEFAULT] section of */etc/manila/manila.conf*: +----------------------------------------------------------------------------------------------------------------------------------+ | [DEFAULT] | +============================+=====================================================================================================+ | **Option** | **Description** | +----------------------------+-----------+-----------------------------------------------------------------------------------------+ | enabled_share_backends | Name of the section on manila.conf used to specify a backend. E.g. *enabled_share_backends = hnas1* | +----------------------------+-----------------------------------------------------------------------------------------------------+ | enabled_share_protocols | Specify a list of protocols to be allowed for share creation. For Hitachi driver this must be: *NFS*| +----------------------------+-----------------------------------------------------------------------------------------------------+ The following parameters need to be configured in the [backend] section of */etc/manila/manila.conf*: +-------------------------------------------------------------------------------------------------------------------------------------+ | [hnas1] | +===============================+=====================================================================================================+ | **Option** | **Description** | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_backend_name | A name for the backend. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_driver | Python module path. For Hitachi driver this must be: | | | *manila.share.drivers.hitachi.hds_hnas.HDSHNASDriver* | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | driver_handles_share_servers | DHSS, Driver working mode. For Hitachi driver **this must be**: | | | *False* | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_ip | HNAS management interface IP for communication between manila node and HNAS. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_password | This field is used to provide password credential to HNAS. | | | Either hds_hnas_password or hds_hnas_ssh_private_key must be set. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_ssh_private_key | Set this parameter with RSA/DSA private key path to allow the driver to connect into HNAS. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_evs_id | ID or Label from EVS which this backend is assigned to (ID and Label can be | | | listed by CLI “evs list” or EVS Management in HNAS Interface). | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_evs_ip | EVS IP for mounting shares (this can be listed by CLI “evs list” or EVS Management in HNAS | | | Interface). | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_file_system_name | Name of the file system in HNAS, located in the specified EVS. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_cluster_admin_ip0* | If HNAS is in a multi-node cluster, set this parameter with the IP of the cluster’s admin node. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | hds_hnas_stalled_job_timeout* | Tree-clone-job commands are used to create snapshots and create shares from snapshots. | | | This parameter sets a timeout (in seconds) to wait for jobs to complete. Default value is | | | 30 seconds. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ \* Non mandatory parameters. Below is an example of a valid configuration of HNAS driver: | ``[DEFAULT]`` | ``enabled_share_backends = hitachi1`` | ``enabled_share_protocols = NFS`` | ``[hitachi1]`` | ``share_backend_name = HITACHI1`` | ``share_driver = manila.share.drivers.hitachi.hds_hnas.HDSHNASDriver`` | ``driver_handles_share_servers = False`` | ``hds_hnas_ip = 172.24.44.15`` | ``hds_hnas_user = supervisor`` | ``hds_hnas_password = supervisor`` | ``hds_hnas_evs_id = 1`` | ``hds_hnas_evs_ip = 10.0.1.20`` | ``hds_hnas_file_system_name = FS-Manila`` Step 4 - Prepare the Network **************************** In the driver mode used by HNAS Driver (DHSS = False), the driver does not handle network configuration, it is up to the administrator to configure it. It is mandatory that HNAS management interface is reachable from Manila-Share node through Admin Network, while the selected EVS data interface is reachable from OpenStack Cloud, such as through Neutron Flat networking. Here is a step-by-step of an example configuration: | **Manila-Share Node:** | **eth0**: Admin Network, can ping HNAS management interface. | **eth1**: Data Network, can ping HNAS EVS IP (data interface). This interface is only required if you plan to use Share Migration. | **Neutron Node and Compute Nodes:** | **eth0**: Admin Network, can ping HNAS management interface. | **eth1**: Data Network, can ping HNAS EVS IP (data interface). The following image represents the described scenario: .. image:: /images/rpc/hds_network.jpg :width: 60% Run in **Neutron Node**: | ``$ sudo ifconfig eth1 0`` | ``$ sudo ovs-vsctl add-br br-eth1`` | ``$ sudo ovs-vsctl add-port br-eth1 eth1`` | ``$ sudo ifconfig eth1 up`` Edit */etc/neutron/plugins/ml2/ml2_conf.ini* (default directory), change the following settings as follows in their respective tags: | ``[ml2]`` | ``type_drivers = flat,vlan,vxlan,gre`` | ``mechanism_drivers = openvswitch`` | ``[ml2_type_flat]`` | ``flat_networks = physnet1,physnet2`` | ``[ml2_type_vlan]`` | ``network_vlan_ranges = physnet1:1000:1500,physnet2:2000:2500`` | ``[ovs]`` | ``bridge_mappings = physnet1:br-ex,physnet2:br-eth1`` You may have to repeat the last line above in another file in the Compute Node, if it exists is located in: */etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini*. Create a route in HNAS to the tenant network. Please make sure multi-tenancy is enabled and routes are configured per EVS. Use the command “route-net-add” in HNAS console, where the network parameter should be the tenant's private network, while the gateway parameter should be the FLAT network gateway and the “console-context --evs” parameter should be the ID of EVS in use, such as in the following example: ``$ console-context --evs 3 route-net-add --gateway 192.168.1.1 10.0.0.0/24`` Step 5 - Share Type Configuration ********************************* Manila requires that the share type includes the driver_handles_share_servers extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers capability. For the Hitachi HNAS manila driver, this must be set to False. ``$ manila type-create hitachi False`` Step 6 - Restart the services ***************************** Restart all manila services (manila-share, manila-scheduler and manila-api) and neutron services (neutron-\*). This step is specific to your environment. If you are running in devstack for example, you have to log into screen (*screen -r*), stop the process (Ctrl^C) and run it again. If you are running it in a distro like RHEL or SUSE, a service command (e.g. *service manila-api restart*) is used to restart the service. Step 7 - Configure the Network ****************************** In Neutron Controller it is necessary to create a network, a subnet and to add this subnet interface to a router: Create a network to the given tenant (demo), providing the DEMO_ID (this can be fetched using *keystone tenant-list*), a name for the network, the name of the physical network over which the virtual network is implemented and the type of the physical mechanism by which the virtual network is implemented: | ``$ neutron net-create --tenant-id hnas_network`` | ``--provider:physical_network=physnet2 --provider:network_type=flat`` Create a subnet to same tenant (demo), providing the DEMO_ID (this can be fetched using *keystone tenant-list*), the gateway IP of this subnet, a name for the subnet, the network ID created on previously step (this can be fetched using *neutron net-list*) and CIDR of subnet: | ``$ neutron subnet-create --tenant-id --gateway `` | ``--name hnas_subnet `` Finally, add the subnet interface to a router, providing the router ID and subnet ID created on previously step (can be fetched using *neutron subnet-list*): | ``$ neutron router-interface-add `` Manage and Unmanage Shares '''''''''''''''''''''''''' Manila has the ability to manage and unmanage shares. If there is a share in the storage and it is not in OpenStack, you can manage that share and use it as a manila Share. HNAS drivers use virtual-volumes (V-VOL) to create shares. Only V-VOL shares can be used by the driver. If the NFS export is an ordinary FS export, it is not possible to use it in manila. The unmanage operation only unlinks the share from manila. All data is preserved. | To **manage** shares use: | ``$ manila manage [--name ] [--description ]`` | ``[--share_type ] [--driver_options [ [ ...]]]`` | `` `` Where: +------------------+----------------------------------------------------------+ | Parameter | Description | +==================+==========================================================+ | | Manila host, backend and share name. e.g. | | service_host | ubuntu\@hitachi1#HITACHI1. The available hosts can be | | | listed with the command: *manila pool-list* (admin only).| +------------------+---------------------+------------------------------------+ | protocol | NFS, it is the only supported protocol in this driver | | | version. | +------------------+----------------------------------------------------------+ | export_path | The export path of the share. | | | e.g. *172.24.44.31:/shares/some_share_id* | +------------------+----------------------------------------------------------+ | To **unmanage** a share use: | ``$ manila unmanage `` Where: +------------------+---------------------------------------------------------+ | Parameter | Description | +==================+=========================================================+ | share_id | Manila ID of the share to be unmanaged. This list can | | | be fetched with: *manila list*. | +------------------+---------------------+-----------------------------------+ Additional Notes: ***************** | - HNAS has some restrictions about the number of EVSs, filesystems, virtual-volumes and simultaneous SSC connections. Check the manual specification for your system. | - Shares and snapshots are thin provisioned. It is reported to manila only the real used space in HNAS. Also, a snapshot does not initially take any space in HNAS, it only stores the difference between the share and the snapshot, so it grows when share data is changed. | - Admins should manage the tenant’s quota (*manila quota-update*) to control the backend usage. The :mod:`manila.share.drivers.hitachi.hds_hnas` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hitachi.hds_hnas :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/manila.rst0000664000567000056710000000362312701407107021431 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common and Misc Libraries ========================= Libraries common throughout manila or just ones that haven't yet been categorized in depth. The :mod:`manila.context` Module -------------------------------- .. automodule:: manila.context :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.exception` Module ---------------------------------- .. automodule:: manila.exception :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.test` Module ----------------------------- .. automodule:: manila.test :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.utils` Module ------------------------------ .. automodule:: manila.utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.wsgi` Module ----------------------------- .. automodule:: manila.wsgi :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`test_exception` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.test_exception :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/threading.rst0000664000567000056710000000435612701407107022141 0ustar jenkinsjenkins00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@utils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) MySQL access and eventlet ------------------------- Queries to the MySQL database will block the main thread of a service. This is because OpenStack services use an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, the resulting database query blocks the thread. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/manila/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html manila-2.0.0/doc/source/devref/experimental_apis.rst0000664000567000056710000000651612701407107023705 0ustar jenkinsjenkins00000000000000Experimental APIs ================= Background ---------- Manila uses API microversions to allow natural evolution of its REST APIs over time. But microversions alone cannot solve the question of how to ship APIs that are experimental in nature, are expected to change at any time, and could even be removed entirely without a typical deprecation period. In conjunction with microversions, manila has added a facility for marking individual REST APIs as experimental. To call an experimental API, clients must include a specific HTTP header, ``X-OpenStack-Manila-API-Experimental``, with a value of ``True``. If a user calls an experimental API without including the experimental header, the server would respond with ``HTTP/404``. This forces the client to acknowledge the experimental status of the API and prevents anyone from building an application around a manila feature without realizing the feature could change significantly or even disappear. On the other hand, if a request is made to a non-experimental manila API with ``X-OpenStack-Manila-API-Experimental: True``, the server would respond as if the header had not been included. This is a convenience mechanism, as it allows the client to specify both the requested API version as well as the experimental header (if desired) in one place instead of having to set the headers separately for each API call (although that would be fine, too). When do I need to set an API experimental? ------------------------------------------ An API should be marked as experimental if any of the following is true: - the API is not yet considered a stable, core API - the API is expected to change in later releases - the API could be removed altogether if a feature is redesigned - the API controls a feature that could change or be removed When do I need to remove the experimental annotation from an API? ----------------------------------------------------------------- When the community is satisfied that an experimental feature and its APIs have had sufficient time to gather and incorporate user feedback to consider it stable, which could be one or more OpenStack release cycles, any relevant APIs must be re-released with a microversion bump and without the experimental flag. The maturation period can vary between features, but experimental is NOT a stable state, and an experimental feature should not be left in that state any longer than necessary. Because experimental APIs have no conventional deprecation period, the manila core team may optionally choose to remove any experimental versions of an API at the same time that a microversioned stable version is added. In Code ------- The ``@api_version`` decorator defined in ``manila/api/openstack/wsgi.py``, which is used for specifying API versions on top-level Controller methods, also allows for tagging an API as experimental. For example: In the controller class:: @wsgi.Controller.api_version("2.4", experimental=True) def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of >= ``2.4``. and had also included ``X-OpenStack-Manila-API-Experimental: True``. If they had specified a lower version (or not specified it and received a lower default version), or if they had failed to include the experimental header, the server would respond with ``HTTP/404``. manila-2.0.0/doc/source/devref/gpfs_driver.rst0000664000567000056710000000675112701407107022507 0ustar jenkinsjenkins00000000000000.. Copyright 2015 IBM Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GPFS Driver =========== GPFS driver uses IBM General Parallel File System (GPFS), a high-performance, clustered file system, developed by IBM, as the storage backend for serving file shares to the manila clients. Supported shared filesystems ---------------------------- - NFS (access by IP) Supported Operations -------------------- - Create NFS Share - Delete NFS Share - Create Share Snapshot - Delete Share Snapshot - Create Share from a Share Snapshot - Allow NFS Share access * Currently only 'rw' access level is supported - Deny NFS Share access Requirements ------------ - Install GPFS with server license, version >= 2.0, on the storage backend. - Install Kernel NFS or Ganesha NFS server on the storage backend servers. - If using Ganesha NFS, currently NFS Ganesha v1.5 and v2.0 are supported. - Create a GPFS cluster and create a filesystem on the cluster, that will be used to create the manila shares. - Enable quotas for the GPFS file system (`mmchfs -Q yes`). - Establish network connection between the manila host and the storage backend. Manila driver configuration setting ----------------------------------- The following parameters in the manila configuration file need to be set: - `share_driver` = manila.share.drivers.ibm.gpfs.GPFSShareDriver - `gpfs_share_export_ip` = - If the backend GPFS server is not running on the manila host machine, the following options are required to SSH to the remote GPFS backend server: - `gpfs_ssh_login` = and one of the following settings is required to execute commands over SSH: - `gpfs_ssh_private_key` = - `gpfs_ssh_password` = The following configuration parameters are optional: - `gpfs_mount_point_base` = - `gpfs_nfs_server_type` = - `gpfs_nfs_server_list` = - `gpfs_ssh_port` = - `knfs_export_options` = Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Known Restrictions ------------------ - The driver does not support a segmented-network multi-tenancy model but instead works over a flat network where the tenants share a network. - While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key' for remote login to the GPFS node must be specified and there must be a passwordless authentication already setup between the manila share service and the remote GPFS node. The :mod:`manila.share.drivers.ibm.gpfs` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.ibm.gpfs :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/auth.rst0000664000567000056710000000341212701407107021125 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. Copyright 2014 Mirantis, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth: Authentication and Authorization ================================ The :mod:`manila.quota` Module ------------------------------ .. automodule:: manila.quota :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.policy` Module ------------------------------- .. automodule:: manila.policy :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`test_quota` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.test_quota :noindex: :members: :undoc-members: :show-inheritance: The :mod:`test_policy` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.test_policy :noindex: :members: :undoc-members: :show-inheritance: System limits ------------- The following limits need to be defined and enforced: * Maximum cumulative size of shares and snapshots (GB) * Total number of shares * Total number of snapshots * Total number of share networks manila-2.0.0/doc/source/devref/api.rst0000664000567000056710000000340512701407107020737 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. API Endpoint ============ Manila has a system for managing multiple APIs on different subdomains. Currently there is support for the OpenStack API. Common Components ----------------- The :mod:`manila.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.api.v1` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.api.contrib` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api :noindex: :members: :undoc-members: :show-inheritance: OpenStack API ------------- The :mod:`openstack` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.api.openstack :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`api` Module ~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.api :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/glusterfs_native_driver.rst0000664000567000056710000001452012701407107025125 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GlusterFS Native driver ======================= GlusterFS Native driver uses GlusterFS, an open source distributed file system, as the storage backend for serving file shares to manila clients. A manila share is a GlusterFS volume. This driver uses flat-network (share-server-less) model. Instances directly talk with the GlusterFS backend storage pool. The instances use 'glusterfs' protocol to mount the GlusterFS shares. Access to each share is allowed via TLS Certificates. Only the instance which has the TLS trust established with the GlusterFS backend can mount and hence use the share. Currently only 'rw' access is supported. Network Approach ---------------- L3 connectivity between the storage backend and the host running the manila share service should exist. Supported shared filesystems ---------------------------- - GlusterFS (share protocol: ``glusterfs``, access by TLS certificates (``cert`` access type)) Multi-tenancy model ------------------- The driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported using tenant specific TLS certificates. Supported Operations -------------------- - Create share - Delete share - Allow share access (rw) - Deny share access - Create snapshot - Delete snapshot - Create share from snapshot Requirements ------------ - Install glusterfs-server package, version >= 3.6.x, on the storage backend. - Install glusterfs and glusterfs-fuse package, version >=3.6.x, on the manila host. - Establish network connection between the manila host and the storage backend. .. _gluster_native_manila_conf: Manila driver configuration setting ----------------------------------- The following parameters in manila's configuration file need to be set: - `share_driver` = manila.share.drivers.glusterfs.glusterfs_native.GlusterfsNativeShareDriver - `glusterfs_servers` = List of GlusterFS servers which provide volumes that can be used to create shares. The servers are expected to be of distinct Gluster clusters (ie. should not be gluster peers). Each server should be of the form ``[@]``. The optional ``@`` part of the server URI indicates SSH access for cluster management (see related optional parameters below). If it is not given, direct command line management is performed (ie. manila host is assumed to be part of the GlusterFS cluster the server belongs to). - `glusterfs_volume_pattern` = Regular expression template used to filter GlusterFS volumes for share creation. The regex template can contain the #{size} parameter which matches a number (sequence of digits) and the value shall be interpreted as size of the volume in GB. Examples: ``manila-share-volume-\d+$``, ``manila-share-volume-#{size}G-\d+$``; with matching volume names, respectively: *manila-share-volume-12*, *manila-share-volume-3G-13*". In latter example, the number that matches ``#{size}``, that is, 3, is an indication that the size of volume is 3G. The following configuration parameters are optional: - `glusterfs_mount_point_base` = - `glusterfs_path_to_private_key` = - `glusterfs_server_password` = Host and backend configuration ------------------------------ - SSL/TLS should be enabled on the I/O path for GlusterFS servers and volumes involved (ie. ones specified in ``glusterfs_servers``), as described in http://www.gluster.org/community/documentation/index.php/SSL. (Enabling SSL/TLS for the management path is also possible but not recommended currently.) - The manila host should be also configured for GlusterFS SSL/TLS (ie. `/etc/ssl/glusterfs.{pem,key,ca}` files has to be deployed as the above document specifies). - There is a further requirement for the CA-s used: the set of CA-s involved should be consensual, ie. `/etc/ssl/glusterfs.ca` should be identical across all the servers and the manila host. - There is a further requirement for the common names (CN-s) of the certificates used: the certificates of the servers should have a common name starting with `glusterfs-server`, and the certificate of the host should have common name starting with `manila-host`. - To support snapshots, bricks that consist the GlusterFS volumes used by manila should be thinly provisioned LVM ones (cf. https://gluster.readthedocs.org/en/latest/Administrator%20Guide/Managing%20Snapshots/). Known Restrictions ------------------ - GlusterFS volumes are not created on demand. A pre-existing set of GlusterFS volumes should be supplied by the GlusterFS cluster(s), conforming to the naming convention encoded by ``glusterfs_volume_pattern``. However, the GlusterFS endpoint is allowed to extend this set any time (so manila and GlusterFS endpoints are expected to communicate volume supply/demand out-of-band). ``glusterfs_volume_pattern`` can include a size hint (with ``#{size}`` syntax), which, if present, requires the GlusterFS end to indicate the size of the shares in GB in the name. (On share creation, manila picks volumes *at least* as big as the requested one.) - Certificate setup (aka trust setup) between instance and storage backend is out of band of manila. - For manila to use GlusterFS volumes, the name of the trashcan directory in GlusterFS volumes must not be changed from the default. The :mod:`manila.share.drivers.glusterfs.glusterfs_native.GlusterfsNativeShareDriver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.glusterfs.glusterfs_native :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/services.rst0000664000567000056710000000457112701407107022016 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to manila. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`manila.service` Module -------------------------------- .. automodule:: manila.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.manager` Module -------------------------------- .. automodule:: manila.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. manila-2.0.0/doc/source/devref/pool-aware-manila-scheduler.rst0000664000567000056710000001750312701407107025453 0ustar jenkinsjenkins00000000000000Pool-Aware Scheduler Support ============================ https://blueprints.launchpad.net/manila/+spec/dynamic-storage-pools Manila currently sees each share backend as a whole, even if the backend consists of several smaller pools with totally different capabilities and capacities. Extending manila to support storage pools within share backends will make manila scheduling decisions smarter as it now knows the full set of capabilities of a backend. Problem Description ------------------- The provisioning decisions in manila are based on the statistics reported by backends. Any backend is assumed to be a single discrete unit with a set of capabilities and single capacity. In reality this assumption is not true for many storage providers, as their storage can be further divided or partitioned into pools to offer completely different sets of capabilities and capacities. That is, there are storage backends which are a combination of storage pools rather than a single homogeneous entity. Usually shares/snapshots can't be placed across pools on such backends. In the current implementation, an attempt is made to map a single backend to a single storage controller, and the following problems may arise: * After the scheduler selects a backend on which to place a new share, the backend may have to make a second decision about where to place the share within that backend. This logic is driver-specific and hard for admins to deal with. * The capabilities that the backend reports back to the scheduler may not apply universally. A single backend may support both SATA and SSD-based storage, but perhaps not at the same time. Backends need a way to express exactly what they support and how much space is consumed out of each type of storage. Therefore, it is important to extend manila so that it is aware of storage pools within each backend and can use them as the finest granularity for resource placement. Proposed change --------------- A pool-aware scheduler will address the need for supporting multiple pools from one storage backend. Terminology ----------- Pool A logical concept to describe a set of storage resources that can be used to serve core manila requests, e.g. shares/snapshots. This notion is almost identical to manila Share Backend, for it has similar attributes (capacity, capability). The difference is that a Pool may not exist on its own; it must reside in a Share Backend. One Share Backend can have multiple Pools but Pools do not have sub-Pools (meaning even if they have them, sub-Pools do not get to exposed to manila, yet). Each Pool has a unique name in the Share Backend namespace, which means a Share Backend cannot have two pools using same name. Design ------ The workflow in this change is simple: 1) Share Backends report how many pools and what those pools look like and are capable of to scheduler; 2) When request comes in, scheduler picks a pool that fits the need best to serve the request, it passes the request to the backend where the target pool resides; 3) Share driver gets the message and lets the target pool serve the request as scheduler instructed. To support placing resources (share/snapshot) onto a pool, these changes will be made to specific components of manila: 1. Share Backends reporting capacity/capabilities at pool level; 2. Scheduler filtering/weighing based on pool capacity/capability and placing shares/snapshots to a pool of a certain backend; 3. Record which backend and pool a resource is located on. Data model impact ----------------- No DB schema change involved, however, the host field of Shares table will now include pool information but no DB migration is needed. Original host field of Shares: ``HostX@BackendY`` With this change: ``HostX@BackendY#pool0`` REST API impact --------------- With pool support added to manila, there is an awkward situation where we require admin to input the exact location for shares to be imported, which must have pool info. But there is no way to find out what pools are there for backends except looking at the scheduler log. That causes a poor user experience and thus is a problem from the User's Point of View. This change simply adds a new admin-api extension to allow admin to fetch all the pool information from scheduler cache (memory), which closes the gap for end users. This extension provides two level of pool information: names only or detailed information: Pool name only: GET http://MANILA_API_ENDPOINT/v1/TENANT_ID/scheduler-stats/pools Detailed Pool info: GET http://MANILA_API_ENDPOINT/v1/TENANT_ID/scheduler-stats/pools/detail Security impact --------------- N/A Notifications impact -------------------- Host attribute of shares now includes pool information in it, consumer of notification can now extend to extract pool information if needed. Admin impact ------------ Administrators now need to suffix commands with ``#pool`` to manage shares. Other end user impact --------------------- No impact visible to the end user directly, but administrators now need to prefix commands that refer to the backend host with the concatenation of the hashtag (``#``) sign and the name of the pool (e.g. ``#poolName``) to manage shares. Other impacts might include scenarios where if a backend does not expose pools, the backend name is used as the pool name. For instance, ``HostX@BackendY#BackendY`` would be used when the driver does not expose pools. Performance Impact ------------------ The size of RPC message for each share stats report will be bigger than before (linear to the number of pools a backend has). It should not really impact the RPC facility in terms of performance and even if it did, pure text compression should easily mitigate this problem. Developer impact ---------------- For those share backends that would like to expose internal pools to manila for more flexibility, developers should update their drivers to include all pool capacities and capabilities in the share stats it reports to scheduler. Share backends without multiple pools do not need to change their implementation. Below is an example of new stats message having multiple pools: :: { 'share_backend_name': 'My Backend', #\ 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'NFS/CIFS', #- stats&capabilities 'active_shares': 10, #\ 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', #/ 'pools': [ {'pool_name': '1st pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'allocated_capacity_gb': 270, # | 'qos': True, # | 'reserved_percentage': 0, #/ 'dying_disks': 100, #\ 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & capabilities 'super_hero_3': 'neoncat' #/ }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': False, 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': ' ', 'super_hero_2': 'Hulk', } ] } Documentation Impact -------------------- Documentation impact for changes in manila are introduced by the API changes. Also, doc changes are needed to append pool names to host names. Driver changes may also introduce new configuration options which would lead to Doc changes. manila-2.0.0/doc/source/devref/cephfs_native_driver.rst0000664000567000056710000001054012701407112024351 0ustar jenkinsjenkins00000000000000 CephFS Native driver ==================== The CephFS Native driver enables manila to export shared filesystems to guests using the Ceph network protocol. Guests require a Ceph client in order to mount the filesystem. Access is controlled via Ceph's cephx authentication system. Each share has a distinct authentication key that must be passed to clients for them to use it. To learn more about configuring Ceph clients to access the shares created using this driver, please see the Ceph documentation( http://docs.ceph.com/docs/master/cephfs/). If you choose to use the kernel client rather than the FUSE client, the share size limits set in Manila may not be obeyed. Prerequisities -------------- - A Ceph cluster with a filesystem configured ( http://docs.ceph.com/docs/master/cephfs/createfs/) - Network connectivity between your Ceph cluster's public network and the server running the :term:`manila-share` service. - Network connectivity between your Ceph cluster's public network and guests .. important:: A manila share backed onto CephFS is only as good as the underlying filesystem. Take care when configuring your Ceph cluster, and consult the latest guidance on the use of CephFS in the Ceph documentation ( http://docs.ceph.com/docs/master/cephfs/) Authorize the driver to communicate with Ceph --------------------------------------------- Run the following command to create a Ceph identity for manila to use: .. code-block:: console ceph auth get-or-create client.manila mon 'allow r; allow command "auth del" with entity prefix client.manila.; allow command "auth caps" with entity prefix client.manila.; allow command "auth get" with entity prefix client.manila., allow command "auth get-or-create" with entity prefix client.manila.' mds 'allow *' osd 'allow rw' > keyring.manila keyring.manila, along with your ceph.conf file, will then need to be placed on the server where the :term:`manila-share` service runs, and the paths to these configured in your manila.conf. Enable snapshots in Ceph if you want to use them in manila: .. code-block:: console ceph mds set allow_new_snaps true --yes-i-really-mean-it Configure CephFS backend in manila.conf --------------------------------------- Add CephFS to ``enabled_share_protocols`` (enforced at manila api layer). In this example we leave NFS and CIFS enabled, although you can remove these if you will only use CephFS: .. code-block:: ini enabled_share_protocols = NFS,CIFS,CEPHFS Create a section like this to define a CephFS backend: .. code-block:: ini [cephfs1] driver_handles_share_servers = False share_backend_name = CEPHFS1 share_driver = manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver cephfs_conf_path = /etc/ceph/ceph.conf cephfs_auth_id = manila Then edit ``enabled_share_backends`` to point to it, using the same name that you used for the backend section. In this example we are also including another backend ("generic1"), you would include whatever other backends you have configured. .. code-block:: ini enabled_share_backends = generic1, cephfs1 Creating shares --------------- The default share type may have driver_handles_share_servers set to True. Configure a share type suitable for cephfs: .. code-block:: console manila type-create cephfstype false Then create yourself a share: .. code-block:: console manila create --share-type cephfstype --name cephshare1 cephfs 1 Mounting a client with FUSE --------------------------- Using the key from your export location, and the share ID, create a keyring file like: .. code-block:: ini [client.share-4c55ad20-9c55-4a5e-9233-8ac64566b98c] key = AQA8+ANW/4ZWNRAAOtWJMFPEihBA1unFImJczA== Using the mon IP addresses from your export location, create a ceph.conf file like: .. code-block:: ini [client] client quota = true [mon.a] mon addr = 192.168.1.7:6789 [mon.b] mon addr = 192.168.1.8:6789 [mon.c] mon addr = 192.168.1.9:6789 Finally, mount the filesystem, substituting the filenames of the keyring and configuration files you just created: .. code-block:: console ceph-fuse --id=share-4c55ad20-9c55-4a5e-9233-8ac64566b98c -c ./client.conf --keyring=./client.keyring --client-mountpoint=/volumes/share-4c55ad20-9c55-4a5e-9233-8ac64566b98c ~/mnt manila-2.0.0/doc/source/devref/hpe_3par_driver.rst0000664000567000056710000002130012701407107023234 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Hewlett Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. HPE 3PAR Driver ============== The HPE 3PAR manila driver provides NFS and CIFS shared file systems to OpenStack using HPE 3PAR's File Persona capabilities. .. note:: In OpenStack releases prior to Mitaka this driver was called the HP 3PAR driver. The Liberty configuration reference can be found at: http://docs.openstack.org/liberty/config-reference/content/hp-3par-share-driver.html Supported Operations -------------------- The following operations are supported with HPE 3PAR File Persona: - Create/delete NFS and CIFS shares * Shares are not accessible until access rules allow access - Allow/deny NFS share access * IP access rules are required for NFS share access * User access rules are not allowed for NFS shares * Access level (RW/RO) is ignored * Shares created from snapshots are always read-only * Shares not created from snapshots are read-write (and subject to ACLs) - Allow/deny CIFS share access * Both IP and user access rules are required for CIFS share access * User access requires a 3PAR local user (LDAP and AD is not yet supported) * Access level (RW/RO) is ignored * Shares created from snapshots are always read-only * Shares not created from snapshots are read-write (and subject to ACLs) - Create/delete snapshots - Create shares from snapshots * Shares created from snapshots are always read-only Share networks are not supported. Shares are created directly on the 3PAR without the use of a share server or service VM. Network connectivity is setup outside of manila. Requirements ------------ On the system running the manila share service: - python-3parclient 4.0.0 or newer from PyPI. On the HPE 3PAR array: - HPE 3PAR Operating System software version 3.2.1 MU3 or higher - A license that enables the File Persona feature - The array class and hardware configuration must support File Persona Pre-Configuration on the HPE 3PAR -------------------------------- - HPE 3PAR File Persona must be initialized and started (:code:`startfs`) - A File Provisioning Group (FPG) must be created for use with manila - A Virtual File Server (VFS) must be created for the FPG - The VFS must be configured with an appropriate share export IP address - A local user in the Administrators group is needed for CIFS shares Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the HPE 3PAR driver: - `share_backend_name` = - `share_driver` = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver - `driver_handles_share_servers` = False - `hpe3par_fpg` = - `hpe3par_share_ip_address` = - `hpe3par_san_ip` = - `hpe3par_api_url` = <3PAR WS API Server URL> - `hpe3par_username` = <3PAR username with the 'edit' role> - `hpe3par_password` = <3PAR password for the user specified in hpe3par_username> - `hpe3par_san_login` = - `hpe3par_san_password` = - `hpe3par_debug` = The `hpe3par_share_ip_address` must be a valid IP address for the configured FPG's VFS. This IP address is used in export locations for shares that are created. Networking must be configured to allow connectivity from clients to shares. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Network Approach ---------------- Connectivity between the storage array (SSH/CLI and WSAPI) and the manila host is required for share management. Connectivity between the clients and the VFS is required for mounting and using the shares. This includes: - Routing from the client to the external network - Assigning the client an external IP address (e.g., a floating IP) - Configuring the manila host networking properly for IP forwarding - Configuring the VFS networking properly for client subnets Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the HPE 3PAR driver, this must be set to False. Another common manila extra-spec used to determine where a share is created is `share_backend_name`. When this extra-spec is defined in the share type, the share will be created on a backend with a matching share_backend_name. The HPE 3PAR driver automatically reports capabilities based on the FPG used for each backend. Share types with extra specs can be created by an administrator to control which share types are allowed to use FPGs with or without specific capabilities. The following extra-specs are used with the capabilities filter and the HPE 3PAR driver: - `hpe3par_flash_cache` = ' True' or ' False' - `thin_provisioning` = ' True' or ' False' - `dedupe` = ' True' or ' False' `hpe3par_flash_cache` will be reported as True for backends that have 3PAR's Adaptive Flash Cache enabled. `thin_provisioning` will be reported as True for backends that use thin provisioned volumes. FPGs that use fully provisioned volumes will report False. Backends that use thin provisioning also support manila's over-subscription feature. `dedupe` will be reported as True for backends that use deduplication technology. Scoped extra-specs are used to influence vendor-specific implementation details. Scoped extra-specs use a prefix followed by a colon. For HPE 3PAR these extra-specs have a prefix of `hpe3par`. For HP 3PAR these extra-specs have a prefix of `hp3par`. The following HPE 3PAR extra-specs are used when creating CIFS (SMB) shares: - `hpe3par:smb_access_based_enum` = true or false - `hpe3par:smb_continuous_avail` = true or false - `hpe3par:smb_cache` = off, manual, optimized or auto `smb_access_based_enum` (Access Based Enumeration) specifies if users can see only the files and directories to which they have been allowed access on the shares. The default is `false`. `smb_continuous_avail` (Continuous Availability) specifies if SMB3 continuous availability features should be enabled for this share. If not specified, the default is `true`. This setting will be ignored with hp3parclient 3.2.1 or earlier. `smb_cache` specifies client-side caching for offline files. Valid values are: * `off`: The client must not cache any files from this share. The share is configured to disallow caching. * `manual`: The client must allow only manual caching for the files open from this share. * `optimized`: The client may cache every file that it opens from this share. Also, the client may satisfy the file requests from its local cache. The share is configured to allow automatic caching of programs and documents. * `auto`: The client may cache every file that it opens from this share. The share is configured to allow automatic caching of documents. * If this is not specified, the default is `manual`. The following HPE 3PAR extra-specs are used when creating NFS shares: - `hpe3par:nfs_options` = Comma separated list of NFS export options The NFS export options have the following limitations: * `ro` and `rw` are not allowed (manila will determine the read-only option) * `no_subtree_check` and `fsid` are not allowed per HPE 3PAR CLI support * `(in)secure` and `(no_)root_squash` are not allowed because the HPE 3PAR driver controls those settings All other NFS options are forwarded to the HPE 3PAR as part of share creation. The HPE 3PAR will do additional validation at share creation time. Refer to HPE 3PAR CLI help for more details. The :mod:`manila.share.drivers.hpe.hpe_3par_driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hpe.hpe_3par_driver :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/unit_tests.rst0000664000567000056710000001303512701407107022367 0ustar jenkinsjenkins00000000000000Unit Tests ========== Manila contains a suite of unit tests, in the manila/tests directory. Any proposed code change will be automatically rejected by the OpenStack Jenkins server [#f1]_ if the change causes unit test failures. Running the tests ----------------- Run the unit tests by doing:: ./run_tests.sh This script is a wrapper around the `nose`_ testrunner and the `pep8`_ checker. .. _nose: http://code.google.com/p/python-nose/ .. _pep8: https://github.com/jcrocholl/pep8 Flags ----- The ``run_tests.sh`` script supports several flags. You can view a list of flags by doing:: run_tests.sh -h This will show the following help information:: Usage: ./run_tests.sh [OPTION]... Run manila's test suite(s) -V, --virtual-env Always use virtualenv. Install automatically if not present -N, --no-virtual-env Don't use virtualenv. Run tests in local environment -s, --no-site-packages Isolate the virtualenv from the global Python environment -r, --recreate-db Recreate the test database (deprecated, as this is now the default). -n, --no-recreate-db Don't recreate the test database. -x, --stop Stop running tests after the first error or failure. -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added. -p, --pep8 Just run pep8 -P, --no-pep8 Don't run pep8 -c, --coverage Generate coverage report -h, --help Print this usage message --hide-elapsed Don't print the elapsed time for each test along with slow test list Because ``run_tests.sh`` is a wrapper around nose, it also accepts the same flags as nosetests. See the `nose options documentation`_ for details about these additional flags. .. _nose options documentation: http://readthedocs.org/docs/nose/en/latest/usage.html#options Running a subset of tests ------------------------- Instead of running all tests, you can specify an individual directory, file, class, or method that contains test code. To run the tests in the ``manila/tests/scheduler`` directory:: ./run_tests.sh scheduler To run the tests in the ``manila/tests/test_libvirt.py`` file:: ./run_tests.sh test_libvirt To run the tests in the `HostStateTestCase` class in ``manila/tests/test_libvirt.py``:: ./run_tests.sh test_libvirt:HostStateTestCase To run the `ToPrimitiveTestCase.test_dict` test method in ``manila/tests/test_utils.py``:: ./run_tests.sh test_utils:ToPrimitiveTestCase.test_dict Suppressing logging output when tests fail ------------------------------------------ By default, when one or more unit test fails, all of the data sent to the logger during the failed tests will appear on standard output, which typically consists of many lines of texts. The logging output can make it difficult to identify which specific tests have failed, unless your terminal has a large scrollback buffer or you have redirected output to a file. You can suppress the logging output by calling ``run_tests.sh`` with the nose flag:: --nologcapture Virtualenv ---------- By default, the tests use the Python packages installed inside a virtualenv [#f2]_. (This is equivalent to using the ``-V, --virtualenv`` flag). If the virtualenv does not exist, it will be created the first time the tests are run. If you wish to recreate the virtualenv, call ``run_tests.sh`` with the flag:: -f, --force Recreating the virtualenv is useful if the package dependencies have changed since the virtualenv was last created. If the ``requirements.txt`` or ``tools/install_venv.py`` files have changed, it's a good idea to recreate the virtualenv. By default, the unit tests will see both the packages in the virtualenv and the packages that have been installed in the Python global environment. In some cases, the packages in the Python global environment may cause a conflict with the packages in the virtualenv. If this occurs, you can isolate the virtualenv from the global environment by using the flag:: -s, --no-site packages If you do not wish to use a virtualenv at all, use the flag:: -N, --no-virtual-env Database -------- Some of the unit tests make queries against an sqlite database [#f3]_. By default, the test database (``tests.sqlite``) is deleted and recreated each time ``run_tests.sh`` is invoked (This is equivalent to using the ``-r, --recreate-db`` flag). To reduce testing time if a database already exists it can be reused by using the flag:: -n, --no-recreate-db Reusing an existing database may cause tests to fail if the schema has changed. If any files in the ``manila/db/sqlalchemy`` have changed, it's a good idea to recreate the test database. Gotchas ------- **Running Tests from Shared Folders** If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues [#f4]_. You can get around this by manually setting or updating the following line in ``manila/tests/conf_fixture.py``:: FLAGS['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. .. rubric:: Footnotes .. [#f1] See :doc:`jenkins`. .. [#f2] See :doc:`development.environment` for more details about the use of virtualenv. .. [#f3] There is an effort underway to use a fake DB implementation for the unit tests. See https://lists.launchpad.net/openstack/msg05604.html .. [#f4] See Vish's comment in this bug report: https://bugs.launchpad.net/manila/+bug/882933 manila-2.0.0/doc/source/devref/driver_requirements.rst0000664000567000056710000002001212701407107024255 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2015 Hitachi Data Systems All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila minimum requirements and features since Mitaka ===================================================== In order for a driver to be accepted into manila code base, there are certain minimum requirements and features that must be met, in order to ensure interoperability and standardized manila functionality among cloud providers. At least one driver mode (:term:`DHSS` true/false) -------------------------------------------------- Driver modes determine if the driver is managing network resources (:term:`DHSS` = true) in an automated way, in order to segregate tenants and private networks by making use of manila Share Networks, or if it is up to the administrator to manually configure all networks (:term:`DHSS` = false) and be responsible for segregation, if that is desired. At least one driver mode must be supported. In :term:`DHSS` = true mode, Share Server entities are used, so the driver must implement functions that setup and teardown such servers. At least one file system sharing protocol ----------------------------------------- In order to serve shares as a shared file system service, the driver must support at least one file system sharing protocol, which can be a new protocol or one of the currently supported protocols. The current list of supported protocols is as follows: - NFS - CIFS - GlusterFS - HDFS Access rules ------------ Access rules control how shares are accessible, by whom, and what the level of access is. Access rule operations include allowing access and denying access to a given share. The authentication type should be based on IP, User and/or Certificate. Drivers must support read-write and read-only access levels for each supported protocol, either through individual access rules or separate export locations. Shares ------ Share servicing is the core functionality of a shared file system service, so a driver must be able to create and delete shares. Share extending --------------- In order to best satisfy cloud service requirements, shares must be elastic, so drivers must implement a share extend function that allows shares' size to be increased. Capabilities ------------ In order for manila to function accordingly to the driver being used, the driver must provide a set of information to manila, known as capabilities, as follows: - share_backend_name: a name for the backend; - driver_handles_share_servers: driver mode, whether this driver instance handles share servers, possible values are true or false; - vendor_name: driver vendor name; - driver_version: current driver instance version; - storage_protocol: list of shared file system protocols supported by this driver instance; - total_capacity_gb: total amount of storage space provided, in GB; - free_capacity_gb: amount of storage space available for use, in GB; - reserved_percentage: percentage of total storage space to be kept from being used. Certain features, if supported by drivers, need to be reported in order to function correctly in manila, such as: - dedupe: whether the backend supports deduplication; - compression: whether the backend supports compressed shares; - thin_provisioning: whether the backend is overprovisioning shares; - pools: list of storage pools managed by this driver instance; - qos: whether the backend supports quality of service for shares. .. note:: for more information please see http://docs.openstack.org/developer/manila/devref/capabilities_and_extra_specs.html Continuous Integration systems ------------------------------ Every driver vendor must supply a CI system that tests its drivers continuously for each patch submitted to OpenStack gerrit. This allows for better QA and quicker response and notification for driver vendors when a patch submitted affects an existing driver. The CI system must run all applicable tempest tests, test all patches Jenkins has posted +1 and post its test results. .. note:: for more information please see http://docs.openstack.org/infra/system-config/third_party.html Unit tests ---------- All drivers submitted must be contemplated with unit tests covering at least 90% of the code, preferably 100% if possible. Unit tests must use mock framework and be located in-tree using a structure that mirrors the functional code, such as directory names and filenames. See template below: :: manila/[tests/]path/to/brand/new/[test_]driver.py Documentation ------------- Drivers submitted must provide and maintain related documentation on openstack-manuals, containing instructions on how to properly install and configure. The intended audience for this manual is cloud operators and administrators. Also, driver maintainers must update the manila share features support mapping documentation found at http://docs.openstack.org/developer/manila/devref/share_back_ends_feature_support_mapping.html Manila optional requirements and features since Mitaka ====================================================== Additional to the minimum required features supported by manila, other optional features can be supported by drivers as they are already supported in manila and can be accessed through the API. Snapshots --------- Share Snapshots allow for data respective to a particular point in time to be saved in order to be used later. In manila API, share snapshots taken can only be restored by creating new shares from them, thus the original share remains unaffected. If Snapshots are supported by drivers, they must be crash-consistent. Managing/Unmanaging shares -------------------------- If :term:`DHSS` = false mode is used, then drivers may implement a function that supports reading existing shares in the backend that were not created by manila. After the previously existing share is registered in manila, it is completely controlled by manila and should not be handled externally anymore. Additionally, a function that de-registers such shares from manila but do not delete from backend may also be supported. Share shrinking --------------- Manila API supports share shrinking, thus a share can be shrunk in a similar way it can be extended, but the driver is responsible for making sure no data is compromised. Share ensuring -------------- In some situations, such as when the driver is restarted, manila attempts to perform maintenance on created shares, on the purpose of ensuring previously created shares are available and being serviced correctly. The driver can implement this function by checking shares' status and performing maintenance operations if needed, such as re-exporting. Manila experimental features since Mitaka ========================================= Some features are initially released as experimental and can be accessed by including specific additional HTTP Request headers. Those features are not recommended for production cloud environments while in experimental stage. Share Migration --------------- Shares can be migrated between different backends and pools. Manila implements migration using an approach that works for any manufacturer, but driver vendors can implement a better optimized migration function for when migration involves backends or pools related to the same vendor. Consistency Groups ------------------ Shares can be created within Consistency Groups in order to guarantee snapshot consistency of multiple shares. In order to make use of this feature, driver vendors must report this capability and implement its functions to work according to the backend, so the feature can be properly invoked through manila API. manila-2.0.0/doc/source/devref/development.environment.rst0000664000567000056710000001174512701407107025061 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing manila on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: http://wiki.openstack.org/GettingTheCode Following these instructions will allow you to run the manila unit tests. If you want to be able to run manila (i.e., create NFS/CIFS shares), you will also need to install dependent projects: Nova, Neutron, Cinder and Glance. For this purpose 'devstack' project can be used (A documented shell script to build complete OpenStack development environments). .. _DeployOpenstack: http://devstack.org/ Virtual environments -------------------- Manila development uses `virtualenv `__ to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or "virtualenv" (a special subdirectory of your manila directory), instead of installing the packages at the system level. .. note:: Virtualenv is useful for running the unit tests, but is not typically used for full integration testing or production usage. Linux Systems ------------- .. note:: This section is tested for manila on Ubuntu (12.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. On Ubuntu:: sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libpq-dev On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: sudo yum install python-devel openssl-devel python-pip git libmysqlclient-dev libqp-dev Mac OS X Systems ---------------- Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version If you have installed OpenSSL 1.0.0a, which can happen when installing a MacPorts package for OpenSSL, you will see an error when running ``manila.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) works fine with manila. Getting the code ---------------- Grab the code:: git clone https://github.com/openstack/manila.git cd manila Running unit tests ------------------ The unit tests will run by default inside a virtualenv in the ``.venv`` directory. Run the unit tests by doing:: ./run_tests.sh The first time you run them, you will be asked if you want to create a virtual environment (hit "y"):: No virtual environment found...create one? (Y/n) See :doc:`unit_tests` for more details. .. _virtualenv: Manually installing and using the virtualenv -------------------------------------------- You can manually install the virtual environment instead of having ``run_tests.sh`` do it for you:: python tools/install_venv.py This will install all of the Python packages listed in the ``requirements.txt`` file into your virtualenv. There will also be some additional packages (pip, distribute, greenlet) that are installed by the ``tools/install_venv.py`` file into the virtualenv. If all goes well, you should get a message something like this:: Manila development environment setup is complete. To activate the manila virtualenv for the extent of your current shell session you can run:: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running:: $ tools/with_venv.sh Contributing Your Work ---------------------- Once your work is complete you may wish to contribute it to the project. Add your name and email address to the ``Authors`` file, and also to the ``.mailmap`` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Manila uses the Gerrit code review system. For information on how to submit your branch to Gerrit, see GerritWorkflow_. .. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow manila-2.0.0/doc/source/devref/glusterfs_driver.rst0000664000567000056710000001726512701407107023570 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GlusterFS driver ================ GlusterFS driver uses GlusterFS, an open source distributed file system, as the storage backend for serving file shares to manila clients. Supported shared filesystems ---------------------------- - NFS (access by IP) Supported Operations -------------------- - Create share - Delete share - Allow share access (rw) - Deny share access - With volume layout: - Create snapshot - Delete snapshot - Create share from snapshot Requirements ------------ - Install glusterfs-server package, version >= 3.5.x, on the storage backend. - Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as the NFS server for the GlusterFS backend. - Install glusterfs and glusterfs-fuse package, version >=3.5.x, on the manila host. - Establish network connection between the manila host and the storage backend. Manila driver configuration setting ----------------------------------- The following parameters in the manila's configuration file need to be set: - `share_driver` = manila.share.drivers.glusterfs.GlusterfsShareDriver The following configuration parameters are optional: - `glusterfs_nfs_server_type` = - `glusterfs_share_layout` = ; cf. :ref:`glusterfs_layouts` - `glusterfs_path_to_private_key` = - `glusterfs_server_password` = If Ganesha NFS server is used (``glusterfs_nfs_server_type = Ganesha``), then by default the Ganesha server is supposed to run on the manila host and is managed by local commands. If it's deployed somewhere else, then it's managed via ssh, which can be configured by the following parameters: - `glusterfs_ganesha_server_ip` - `glusterfs_ganesha_server_username` - `glusterfs_ganesha_server_password` In lack of ``glusterfs_ganesha_server_password`` ssh access will fall back to key based authentication, using the key specified by ``glusterfs_path_to_private_key``, or, in lack of that, a key at one of the OpenSSH-style default key locations (*~/.ssh/id_{r,d,ecd}sa*). For further (non driver specific) configuration of Ganesha, see :doc:`ganesha`. It is recommended to consult with :doc:`ganesha`: :ref:`ganesha_known_issues` too. Layouts have also their set of parameters, see :ref:`glusterfs_layouts` about that. .. _glusterfs_layouts: Layouts ------- New in Liberty, multiple share layouts can be used with glusterfs driver. A layout is a strategy of allocating storage from GlusterFS backends for shares. Currently there are two layouts implemented: - `directory mapped layout` (or `directory layout`, or `dir layout` for short): a share is backed by top-level subdirectories of a given GlusterFS volume. Directory mapped layout is the default and backward compatible with Kilo. The following setting explicitly specifies its usage: ``glusterfs_share_layout = layout_directory.GlusterfsDirectoryMappedLayout``. Options: - `glusterfs_target`: address of the volume that hosts the directories. If it's of the format `:/`, then the manila host is expected to be part of the GlusterFS cluster of the volume and GlusterFS management happens through locally calling the ``gluster`` utility. If it's of the format `@:/`, then we ssh to `@` to execute ``gluster`` (`` is supposed to have administrative privileges on ``). - `glusterfs_mount_point_base` = (optional; defaults to *$state_path*\ ``/mnt``, where *$state_path* defaults to ``/var/lib/manila``) Limitations: - directory layout does not support snapshot operations. - `volume mapped layout` (or `volume layout`, or `vol layout` for short): a share is backed by a whole GlusterFS volume. Volume mapped layout is new in Liberty. It can be chosen by setting ``glusterfs_share_layout = layout_volume.GlusterfsVolumeMappedLayout``. Options (required): - `glusterfs_servers` - `glusterfs_volume_pattern` Volume mapped layout is implemented as a common backend of the glusterfs and glusterfs-native drivers; see the description of these options in :doc:`glusterfs_native_driver`: :ref:`gluster_native_manila_conf`. Gluster NFS with volume mapped layout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A special configuration choice is :: glusterfs_nfs_server_type = Gluster glusterfs_share_layout = layout_volume.GlusterfsVolumeMappedLayout that is, Gluster NFS used to export whole volumes. All other GlusterFS backend configurations (including GlusterFS set up with glusterfs-native) require the ``nfs.export-volumes = off`` GlusterFS setting. Gluster NFS with volume layout requires ``nfs.export-volumes = on``. ``nfs.export-volumes`` is a *cluster-wide* setting, so a given GlusterFS cluster cannot host a share backend with Gluster NFS + volume layout and other share backend configurations at the same time. There is another caveat with ``nfs.export-volumes``: setting it to ``on`` without enough care is a security risk, as the default access control for the volume exports is "allow all". For this reason, while the ``nfs.export-volumes = off`` setting is automatically set by manila for all other share backend configurations, ``nfs.export-volumes = on`` is *not* set by manila in case of a Gluster NFS with volume layout setup. It's left to the GlusterFS admin to make this setting in conjunction with the associated safeguards (that is, for those volumes of the cluster which are not used by manila, access restrictions have to be manually configured through the ``nfs.rpc-auth-{allow,reject}`` options). Known Restrictions ------------------ - The driver does not support network segmented multi-tenancy model, but instead works over a flat network, where the tenants share a network. - If NFS Ganesha is the NFS server used by the GlusterFS backend, then the shares can be accessed by NFSv3 and v4 protocols. However, if Gluster NFS is used by the GlusterFS backend, then the shares can only be accessed by NFSv3 protocol. - All manila shares, which map to subdirectories within a GlusterFS volume, are currently created within a single GlusterFS volume of a GlusterFS storage pool. - The driver does not provide read-only access level for shares. - Assume that share S is exported through Gluster NFS, and tenant machine T has mounted S. If at this point access of T to S is revoked through `access-deny`, the pre-existing mount will be still usable and T will still be able to access the data in S as long as that mount is in place. (This violates the principle *Access deny should always result in immediate loss of access to the share*, see http://lists.openstack.org/pipermail/openstack-dev/2015-July/069109.html.) The :mod:`manila.share.drivers.glusterfs` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.glusterfs :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/tegile_driver.rst0000664000567000056710000001613512701407107023016 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2016 Tegile Systems Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Tegile Driver ============= The Tegile Manila driver uses Tegile IntelliFlash Arrays to provide shared filesystems to OpenStack. The Tegile Driver interfaces with a Tegile Array via the REST API. Requirements ------------ - Tegile IntelliFlash version 3.5.1 - For using CIFS, Active Directory must be configured in the Tegile Array. Supported Operations -------------------- The following operations are supported on a Tegile Array: * Create CIFS/NFS Share * Delete CIFS/NFS Share * Allow CIFS/NFS Share access * Only IP access type is supported for NFS * USER access type is supported for NFS and CIFS * RW and RO access supported * Deny CIFS/NFS Share access * IP access type is supported for NFS * USER access type is supported for NFS and CIFS * Create snapshot * Delete snapshot * Extend share * Shrink share * Create share from snapshot Backend Configuration --------------------- The following parameters need to be configured in the [DEFAULT] section of */etc/manila/manila.conf*: +-----------------------------------------------------------------------------------------------------------------------------------+ | [DEFAULT] | +============================+======================================================================================================+ | **Option** | **Description** | +----------------------------+-----------+------------------------------------------------------------------------------------------+ | enabled_share_backends | Name of the section on manila.conf used to specify a backend. | | | E.g. *enabled_share_backends = tegileNAS* | +----------------------------+------------------------------------------------------------------------------------------------------+ | enabled_share_protocols | Specify a list of protocols to be allowed for share creation. For Tegile driver this can be: | | | *NFS* or *CIFS* or *NFS, CIFS*. | +----------------------------+------------------------------------------------------------------------------------------------------+ The following parameters need to be configured in the [backend] section of */etc/manila/manila.conf*: +-------------------------------------------------------------------------------------------------------------------------------------+ | [tegileNAS] | +===============================+=====================================================================================================+ | **Option** | **Description** | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_backend_name | A name for the backend. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_driver | Python module path. For Tegile driver this must be: | | | *manila.share.drivers.tegile.tegile.TegileShareDriver*. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | driver_handles_share_servers| DHSS, Driver working mode. For Tegile driver **this must be**: | | | *False*. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_server | Tegile array IP to connect from the Manila node. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_login | This field is used to provide username credential to Tegile array. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_password | This field is used to provide password credential to Tegile array. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_default_project | This field can be used to specify the default project in Tegile array where shares are created. | | | This field is optional. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ Below is an example of a valid configuration of Tegile driver: | ``[DEFAULT]`` | ``enabled_share_backends = tegileNAS`` | ``enabled_share_protocols = NFS,CIFS`` | ``[tegileNAS]`` | ``driver_handles_share_servers = False`` | ``share_backend_name = tegileNAS`` | ``share_driver = manila.share.drivers.tegile.tegile.TegileShareDriver`` | ``tegile_nas_server = 10.12.14.16`` | ``tegile_nas_login = admin`` | ``tegile_nas_password = password`` | ``tegile_default_project = financeshares`` Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Restrictions ------------ The Tegile driver has the following restrictions: - IP access type is supported only for NFS. - Only FLAT network is supported. The :mod:`manila.share.drivers.tegile.tegile` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.tegile.tegile :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: TegileAPIExecutor, debugger manila-2.0.0/doc/source/devref/api_microversion_history.rst0000664000567000056710000000011012701407107025305 0ustar jenkinsjenkins00000000000000.. include:: ../../../manila/api/openstack/rest_api_version_history.rst manila-2.0.0/doc/source/devref/ganesha.rst0000664000567000056710000001731512701407107021601 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ganesha Library =============== The Ganesha Library provides base classes that can be used by drivers to provision shares via NFS (NFSv3 and NFSv4), utilizing the NFS-Ganesha NFS server. Supported operations -------------------- - Allow NFS Share access - Only IP access type is supported. - Deny NFS Share access Requirements ------------ `NFS-Ganesha `__ 2.1 or newer. NFS-Ganesha configuration ------------------------- The library has just modest requirements against general NFS-Ganesha (in the following: Ganesha) configuration; a best effort was made to remain agnostic towards it as much as possible. This section describes the few requirements. Note that Ganesha's concept of storage backend modules is called FSAL ("File System Abstraction Layer"). The FSAL the driver intends to leverage needs to be enabled in Ganesha config. Beyond that (with default manila config) the following line is needed to be present in the Ganesha config file (that defaults to /etc/ganesha/ganesha.conf): ``%include /etc/ganesha/export.d/INDEX.conf`` The above paths can be customized through manila configuration as follows: - `ganesha_config_dir` = toplevel directory for Ganesha configuration, defaults to /etc/ganesha - `ganesha_config_path` = location of the Ganesha config file, defaults to ganesha.conf in `ganesha_config_dir` - `ganesha_export_dir` = directory where manila generated config bits are stored, defaults to `export.d` in `ganesha_config_dir`. The following line is required to be included (with value expanded) in the Ganesha config file (at `ganesha_config_path`): ``%include /INDEX.conf`` Further Ganesha related manila configuration -------------------------------------------- There are further Ganesha related options in manila (which affect the behavior of Ganesha, but do not affect how to set up the Ganesha service itself). These are: - `ganesha_service_name` = name of the system service representing Ganesha, defaults to ganesha.nfsd - `ganesha_db_path` = location of on-disk database storing permanent Ganesha state - `ganesha_export_template_dir` = directory from where Ganesha loads export customizations (cf. "Customizing Ganesha exports"). Using Ganesha Library in drivers -------------------------------- A driver that wants to use the Ganesha Library has to inherit from ``driver.GaneshaMixin``. The driver has to contain a subclass of ``ganesha.GaneshaNASHelper``, instantiate it along with the driver instance and delegate ``allow_access`` and ``deny_access`` methods to it (when appropriate, ie. when ``access_proto`` is NFS). In the following we explain what has to be implemented by the ``ganesha.GaneshaNASHelper`` subclass (to which we refer as "helper class"). Ganesha exports are described by so-called *Ganesha export blocks* (introduced in the 2.* release series), that is, snippets of Ganesha config specifying key-pair values. The Ganesha Library generates sane default export blocks for the exports it manages, with one thing left blank, the so-called *FSAL subblock*. The helper class has to implement the ``_fsal_hook`` method which returns the FSAL subblock (in Python represented as a dict with string keys and values). It has one mandatory key, ``Name``, to which the value should be the name of the FSAL (eg.: ``{"Name": "GLUSTER"}``). Further content of it is optional and FSAL specific. Customizing Ganesha exports --------------------------- As noted, the Ganesha Library provides sane general defaults. However, the driver is allowed to: - customize defaults - allow users to customize exports The config format for Ganesha Library is called *export block template*. They are syntactically either Ganesha export blocks, (please consult the Ganesha documentation about the format), or isomorphic JSON (as Ganesha export blocks are by-and-large equivalent to arrayless JSON), with two special placeholders for values: ``@config`` and ``@runtime``. ``@config`` means a value that shall be filled from manila config, and ``@runtime`` means a value that's filled at runtime with dynamic data. As an example, we show the library's defaults in JSON format (also valid Python literal): :: { "EXPORT": { "Export_Id": "@runtime", "Path": "@runtime", "FSAL": { "Name": "@config" }, "Pseudo": "@runtime", "SecType": "sys", "Tag": "@runtime", "CLIENT": { "Clients": "@runtime", "Access_Type": "RW" }, "Squash": "None" } } The Ganesha Library takes these values from *manila/share/drivers/ganesha/conf/00-base-export-template.conf* where the same data is stored in Ganesha conf format (also supplied with comments). For customization, the driver has to extend the ``_default_config_hook`` method as follows: - take the result of the super method (a dict representing an export block template) - set up another export block dict that include your custom values, either by - using a predefined export block dict stored in code - loading a predefined export block from the manila source tree - loading an export block from an user exposed location (to allow user configuration) - merge the two export block dict using the ``ganesha_utils.patch`` method - return the result With respect to *loading export blocks*, that can be done through the utility method ``_load_conf_dir``. Known Restrictions ------------------ - The library does not support network segmented multi-tenancy model but instead works over a flat network, where the tenants share a network. .. _ganesha_known_issues Known Issues ------------ - The export location for shares of a driver that uses the Ganesha Library will be of the format ``:/share-``. However, this is incomplete information, because it pertains only to NFSv3 access, which is partially broken. NFSv4 mounts work well but the actual NFSv4 export paths differ from the above. In detail: - The export location is usable only for NFSv3 mounts. - The export location works only for the first access rule that's added for the given share. Tenants that should be allowed to access according to a further access rule will be refused (cf. https://bugs.launchpad.net/manila/+bug/1513061). - The share is, however, exported through NFSv4, just on paths that differ from the one indicated by the export location, namely at: ``:/share---``, where ```` ranges over the ID-s of access rules of the share (and the export with ```` is accessible according to the access rule of that ID). - NFSv4 access also works with pseudofs. That is, the tenant can do a v4 mount of``:/`` and access the shares allowed for her at the respective ``share---`` subdirectories. The :mod:`manila.share.drivers.ganesha` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.ganesha :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/rpc.rst0000664000567000056710000003212312701407107020751 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and manila =============== AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two manila components and allows them to communicate in a loosely coupled fashion. More precisely, manila components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Manila uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /images/rpc/arch.png :width: 60% .. Manila implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each manila service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Manila-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Manila RPC Mappings ------------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every manila component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the manila object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in manila. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: /images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: /images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below the message flow during an rp.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after manila components' bootstrap in a test environment. Exchanges and queues being created by manila components are: * Exchanges 1. manila (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. share.phantom (phantom is hostname) 6. share 7. scheduler.phantom (phantom is hostname) 8. scheduler .. image:: /images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Manila uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgment is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgment on the server-side. This is different from auto_ack in that acknowledgment is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. manila-2.0.0/doc/source/devref/capabilities_and_extra_specs.rst0000664000567000056710000002255412701407107026047 0ustar jenkinsjenkins00000000000000Capabilities and Extra-Specs ============================ Manila Administrators create share types with extra-specs to allow users to request a type of share to create. The Administrator chooses a name for the share type and decides how to communicate the significance of the different share types in terms that the users should understand or need to know. By design, most of the details of a share type (the extra- specs) are not exposed to users -- only Administrators. Share Types ----------- Refer to the manila client command-line help for information on how to create a share type and set "extra-spec" key/value pairs for a share type. Extra-Specs ----------- There are 3 types of extra-specs: required, scoped, and un-scoped. Manila *requires* the driver_handles_share_servers extra-spec. *Scoped* extra-specs use a prefix followed by a colon to define a namespace for scoping the extra-spec. A prefix could be a vendor name or acronym and is a hint that this extra-spec key/value only applies to that vendor's driver. Scoped extra-specs are not used by the scheduler to determine where a share is created (except for the special `capabilities` prefix). It is up to each driver implementation to determine how to use scoped extra-specs and to document them. The prefix "capabilities" is a special prefix to indicate extra-specs that are treated like un-scoped extra-specs. In the CapabilitiesFilter the "capabilities:" is stripped from the key and then the extra-spec key and value are used as an un-scoped extra-spec. *Un-scoped* extra-specs have a key that either starts with "capabilities:" or does not contain a colon. When the CapabilitiesFilter is enabled (it is enabled by default), the scheduler will only create a share on a backend that reports capabilities that match the share type's un-scoped extra-spec keys. The CapabilitiesFilter uses the following for matching operators: * No operator This defaults to doing a python ==. Additionally it will match boolean values. * **<=, >=, ==, !=** This does a float conversion and then uses the python operators as expected. * **** This either chooses a host that has partially matching string in the capability or chooses a host if it matches any value in a list. For example, if " sse4" is used, it will match a host that reports capability of "sse4_1" or "sse4_2". * **** This chooses a host that has one of the items specified. If the first word in the string is , another and value pair can be concatenated. Examples are " 3", " 3 5", and " 1 3 7". This is for string values only. * **** This chooses a host that matches a boolean capability. An example extra-spec value would be " True". * **=** This does a float conversion and chooses a host that has equal to or greater than the resource specified. This operator behaves this way for historical reasons. * **s==, s!=, s>=, s>, s<=, s<** The "s" indicates it is a string comparison. These choose a host that satisfies the comparison of strings in capability and specification. For example, if "capabilities:replication_type s== dr", a host that reports replication_type of "dr" will be chosen. For vendor-specific capabilities (which need to be visible to the CapabilityFilter), it is recommended to use the vendor prefix followed by an underscore. This is not a strict requirement, but will provide a consistent look along-side the scoped extra-specs and will be a clear indicator of vendor capabilities vs. common capabilities. Common Capabilities ------------------- For capabilities that apply to multiple backends a common capability can be created. * `driver_handles_share_servers` is a special, required, user-visible common capability. Added in Kilo. * `dedupe` - indicates that a backend/pool can provide shares using some deduplication technology. The default value of the dedupe capability (if a driver doesn't report it) is False. Drivers cannot report to the scheduler that they support both dedupe and non-deduped share. For each pool it's either always on or always off. Administrators can make a share type use deduplication by setting this extra-spec to ' True'. Administrators can prevent a share type from using deduplication by setting this extra-spec to ' False'. Added in Liberty. * `compression` - indicates that a backend/pool can provide shares using some compression technology. The default value of the compression capability (if a driver doesn't report it) is False. Drivers cannot report to the scheduler that they support both compression and non-compression. For each pool it's either always on or always off. Administrators can make a share type use compression by setting this extra-spec to ' True'. Administrators can prevent a share type from using compression by setting this extra-spec to ' False'. Added in Liberty. * `thin_provisioning` - shares will not be space guaranteed and overprovisioning will be enabled. This capability defaults to False. Backends/pools that support thin provisioning must report True for this capability. Administrators can make a share type use thin provisioned shares by setting this extra-spec to ' True'. If a driver reports thin_provisioning=False (the default) then it's assumed that the driver is doing thick provisioning and overprovisioning is turned off. If an array can technically support both thin and thick provisioning in a pool, the driver still needs to programmatically determine which to use. This should be done by configuring one pool for thin and another pool for thick. So, a manila pool will always report thin_provisioning as True or False. Added in Liberty. * `qos` - indicates that a backend/pool can provide shares using some QoS (Quality of Service) specification. The default value of the qos capability (if a driver doesn't report it) is False. Administrators can make a share type use QoS by setting this extra-spec to ' True' and also setting the relevant QoS-related extra specs for the drivers being used. Administrators can prevent a share type from using QoS by setting this extra-spec to ' False'. Different drivers have different ways of specifying QoS limits (or guarantees) and this extra spec merely allows the scheduler to filter by pools that either have or don't have QoS support enabled. Added in Mitaka. Reporting Capabilities ---------------------- Drivers report capabilities as part of the updated stats (e.g. capacity) for their backend/pools. This is how a backend/pool advertizes its ability to provide a share that matches the capabilities requested in the share type extra-specs. Developer impact ---------------- Developers should update their drivers to include all backend and pool capacities and capabilities in the share stats it reports to scheduler. Below is an example having multiple pools. "my" is used as an example vendor prefix: :: { 'driver_handles_share_servers': 'False', #\ 'share_backend_name': 'My Backend', # backend level 'vendor_name': 'MY', # mandatory/fixed 'driver_version': '1.0', # stats & capabilities 'storage_protocol': 'NFS_CIFS', #/ #\ 'my_capability_1': 'custom_val', # "my" optional vendor 'my_capability_2': True, # stats & capabilities #/ 'pools': [ {'pool_name': 'thin-dedupe-compression pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'reserved_percentage': 0, #/ #\ 'dedupe': True, # common capabilities 'compression': True, # 'qos': True, # this backend supports QoS 'thin_provisioning': True, # 'max_over_subscription_ratio': 10, # (mandatory for thin) 'provisioned_capacity_gb': 270, # (mandatory for thin) #/ 'my_dying_disks': 100, #\ 'my_super_hero_1': 'Hulk', # "my" optional vendor 'my_super_hero_2': 'Spider-Man' # stats & capabilities #/ }, {'pool_name': 'thick pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'qos': False, 'reserved_percentage': 0, 'dedupe': False, 'compression': False, 'thin_provisioning': False, 'my_dying_disks': 200, 'my_super_hero_1': 'Batman', 'my_super_hero_2': 'Robin', }, ] } Work Flow --------- 1) Share Backends report how many pools and what those pools look like and are capable of to scheduler; 2) When request comes in, scheduler picks a pool that fits the need best to serve the request, it passes the request to the backend where the target pool resides; 3) Share driver gets the message and lets the target pool serve the request as scheduler instructed. Share type extra-specs (scoped and un-scoped) are available for the driver implementation to use as-needed. manila-2.0.0/doc/source/devref/huawei_nas_driver.rst0000664000567000056710000002062412701407107023666 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2015 Huawei Technologies Co., Ltd. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Huawei Driver ============= Huawei NAS Driver is a plugin based the OpenStack manila service. The Huawei NAS Driver can be used to provide functions such as the share and snapshot for virtual machines(instances) in OpenStack. Huawei NAS Driver enables the OceanStor V3 series V300R002 storage system to provide only network filesystems for OpenStack. Requirements ------------ - The OceanStor V3 series V300R002 storage system. - The following licenses should be activated on V3 for File: * CIFS * NFS * HyperSnap License (for snapshot) Supported Operations -------------------- The following operations is supported on V3 storage: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * IP and USER access types are supported for NFS(ro/rw). * Only USER access type is supported for CIFS(ro/rw). - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Manage CIFS/NFS share - Support pools in one backend - Extend share - Shrink share - Support multi RestURLs() - Support multi-tenancy - Ensure share - Create share from snapshot - Support QoS Pre-Configurations on Huawei ---------------------------- 1. Create a driver configuration file. The driver configuration file name must be the same as the manila_huawei_conf_file item in the manila_conf configuration file. 2. Configure Product. Product indicates the storage system type. For the OceanStor V3 series V300R002 storage systems, the driver configuration file is as follows: :: V3 x.x.x.x abc;CTE0.A.H1 https://x.x.x.x:8088/deviceManager/rest/; https://x.x.x.x:8088/deviceManager/rest/ xxxxxxxxx xxxxxxxxx xxxxxxxxx xxxxxxxx 3 60 x.x.x.x xxxxxxxxx xxxxxxxxx - `Product` is a type of a storage product. Set it to `V3`. - `LogicalPortIP` is an IP address of the logical port. - `Port` is a port name list of bond port or ETH port, used to create vlan and logical port. Multi Ports can be configured in (separated by ";"). If is not configured, then will choose an online port on the array. - `RestURL` is an access address of the REST interface. Multi RestURLs can be configured in (separated by ";"). When one of the RestURL failed to connect, driver will retry another automatically. - `UserName` is a user name of an administrator. - `UserPassword` is a password of an administrator. - `StoragePool` is a name of a storage pool to be used. - `AllocType` is the file system space allocation type, optional value is "Thick" or "Thin". - `WaitInterval` is the interval time of querying the file system status. - `Timeout` is the timeout period for waiting command execution of a device to complete. - `NFSClient\IP` is the backend IP in admin network to use for mounting NFS share. - `CIFSClient\UserName` is the backend user name in admin network to use for mounting CIFS share. - `CIFSClient\UserPassword` is the backend password in admin network to use for mounting CIFS share. Backend Configuration --------------------- Modify the `manila.conf` manila configuration file and add share_driver and manila_huawei_conf_file items. Example for configuring a storage system: - `share_driver` = manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver - `manila_huawei_conf_file` = /etc/manila/manila_huawei_conf.xml - `driver_handles_share_servers` = True or False .. note:: - If `driver_handles_share_servers` is True, the driver will choose a port in to create vlan and logical port for each tenant network. And the share type with the DHSS extra spec should be set to True when creating shares. - If `driver_handles_share_servers` is False, then will use the IP in . Also the share type with the DHSS extra spec should be set to False when creating shares. Restart of manila-share service is needed for the configuration changes to take effect. Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the Huawei driver, this must be set to False. Another common manila extra-spec used to determine where a share is created is `share_backend_name`. When this extra-spec is defined in the share type, the share will be created on a backend with a matching share_backend_name. Manila "share types" may contain qualified extra-specs, -extra-specs that have significance for the backend driver and the CapabilityFilter. This commit makes the Huawei driver report the following boolean capabilities: - capabilities:dedupe - capabilities:compression - capabilities:thin_provisioning - capabilities:huawei_smartcache * huawei_smartcache:cachename - capabilities:huawei_smartpartition * huawei_smartpartition:partitionname - capabilities:qos * qos:maxIOPS * qos:minIOPS * qos:minbandwidth * qos:maxbandwidth * qos:latency * qos:iotype The scheduler will choose a host that supports the needed capability when the CapabilityFilter is used and a share type uses one or more of the following extra-specs: - capabilities:dedupe=' True' or ' False' - capabilities:compression=' True' or ' False' - capabilities:thin_provisioning=' True' or ' False' - capabilities:huawei_smartcache=' True' or ' False' * huawei_smartcache:cachename=test_cache_name - capabilities:huawei_smartpartition=' True' or ' False' * huawei_smartpartition:partitionname=test_partition_name - capabilities:qos=' True' or ' False' * qos:maxIOPS=100 * qos:minIOPS=10 * qos:maxbandwidth=100 * qos:minbandwidth=10 * qos:latency=10 * qos:iotype=0 `thin_provisioning` will be reported as [True, False] for Huawei backends. `dedupe` will be reported as [True, False] for Huawei backends. `compression` will be reported as [True, False] for Huawei backends. `huawei_smartcache` will be reported as [True, False] for Huawei backends. Adds SSDs into a high-speed cache pool and divides the pool into multiple cache partitions to cache hotspot data in random and small read I/Os. `huawei_smartpartition` will be reported as [True, False] for Huawei backends. Add share to the smartpartition named 'test_partition_name'. Allocates cache resources based on service characteristics, ensuring the quality of critical services. `qos` will be reported as True for backends that use QoS (Quality of Service) specification. Restrictions ------------ The Huawei driver has the following restrictions: - IP and USER access types are supported for NFS. - Only LDAP domain is supported for NFS. - Only USER access type is supported for CIFS. - Only AD domain is supported for CIFS. The :mod:`manila.share.drivers.huawei.huawei_nas` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.huawei.huawei_nas :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/gerrit.rst0000664000567000056710000000102412701407107021455 0ustar jenkinsjenkins00000000000000Code Reviews with Gerrit ======================== Manila uses the `Gerrit`_ tool to review proposed code changes. The review site is http://review.openstack.org. Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the manila repository will be ignored`. See the `Development Workflow`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: http://code.google.com/p/gerrit .. _Development Workflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow manila-2.0.0/doc/source/devref/addmethod.openstackapi.rst0000664000567000056710000000524512701407107024603 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 OpenStack LLC All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``manila/api/v1/router.py``. See http://routes.groovie.org/manual.html for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``manila/api/v1`` and ``manila/api/contrib``. See ``manila/api/v1/shares.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML based on the request's content-type. If you define a new controller, you'll need to define a ``_serialization_metadata`` attribute on the class, to tell wsgi.Controller how to convert your dictionary to XML. It needs to know the singular form of any list tag (e.g. ```` list contains ```` tags) and which dictionary keys are to be XML attributes as opposed to subtags (e.g. ```` instead of ``4``). See `manila/api/v1/shares.py` for an example. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc.HTTPNotFound()) replacing the exception as appropriate. manila-2.0.0/doc/source/devref/export_location_metadata.rst0000664000567000056710000000343312701407107025240 0ustar jenkinsjenkins00000000000000Export Location Metadata ======================== Manila shares can have one or more export locations. The exact number depends on the driver and the storage controller, and there is no preference for more or fewer export locations. Usually drivers create an export location for each physical network interface through which the share can be accessed. Because not all export locations have the same qualities, Manila allows drivers to add additional keys to the dict returned for each export location when a share is created. The share manager stores these extra keys and values in the database and they are available to the API service, which may expose them through the REST API or use them for filtering. Metadata Keys ============= Only keys defined in this document are valid. Arbitrary driver-defined keys are not allowed. The following keys are defined: * `is_admin_only` - May be True or False. Defaults to False. Indicates that the export location exists for administrative purposes. If is_admin_only=True, then the export location is hidden from non-admin users calling the REST API. Also, these export locations are assumed to be reachable directly from the admin network, which is important for drivers that support share servers and which have some export locations only accessible to tenants. * `preferred` - May be True or False. Defaults to False. Indicates that clients should prefer to mount this export location over other export locations that are not preferred. This may be used by drivers which have fast/slow paths to indicate to clients which paths are faster. It could be used to indicate a path is preferred for another reason, as long as the reason isn't one that changes over the life of the manila-share service. This key is always visible through the REST API. manila-2.0.0/doc/source/devref/emc_isilon_driver.rst0000664000567000056710000000601412701407107023661 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2015 EMC Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Isilon Driver ============= The EMC manila driver framework (EMCShareDriver) utilizes EMC storage products to provide shared filesystems to OpenStack. The EMC manila driver is a plugin based driver which is designed to use different plugins to manage different EMC storage products. The Isilon manila driver is a plugin for the EMC manila driver framework which allows manila to interface with an Isilon backend to provide a shared filesystem. The EMC driver framework with the Isilon plugin is referred to as the "Isilon Driver" in this document. This Isilon Driver interfaces with an Isilon cluster via the REST Isilon Platform API (PAPI) and the RESTful Access to Namespace API (RAN). Requirements ------------ - Isilon cluster running OneFS 7.2 or higher Supported Operations -------------------- The following operations are supported on an Isilon cluster: * Create CIFS/NFS Share * Delete CIFS/NFS Share * Allow CIFS/NFS Share access * Only IP access type is supported for NFS and CIFS * Only RW access supported * Deny CIFS/NFS Share access * Create snapshot * Delete snapshot * Create share from snapshot * Extend share Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the Isilon driver: * share_driver = manila.share.drivers.emc.driver.EMCShareDriver * driver_handles_share_servers = False * emc_share_backend = isilon * emc_nas_server = * emc_nas_server_port = * emc_nas_login = * emc_nas_password = * emc_nas_root_dir = Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Restrictions ------------ The Isilon driver has the following restrictions: - Only IP access type is supported for NFS and CIFS. - Only FLAT network is supported. The :mod:`manila.share.drivers.emc.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.emc.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.emc.plugins.isilon.isilon` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.emc.plugins.isilon.isilon :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/zfs_on_linux_driver.rst0000664000567000056710000001334712701407112024260 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2016 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ZFS (on Linux) Driver ===================== Manila ZFSonLinux share driver uses ZFS filesystem for exporting NFS shares. Written and tested using Linux version of ZFS. Requirements ------------ * 'NFS' daemon that can be handled via "exportfs" app. * 'ZFS' filesystem packages, either Kernel or FUSE versions. * ZFS zpools that are going to be used by Manila should exist and be configured as desired. Manila will not change zpool configuration. * For remote ZFS hosts according to manila-share service host SSH should be installed. * For ZFS hosts that support replication: * SSH access for each other should be passwordless. * IP used for share exports should be available by ZFS hosts for each other. * Username should be the same for accessing each of ZFS hosts. Supported Operations -------------------- The following operations are supported: * Create NFS Share * Delete NFS Share * Allow NFS Share access * Only IP access type is supported for NFS * Both access levels are supported - 'RW' and 'RO' * Deny NFS Share access * Create snapshot * Delete snapshot * Create share from snapshot * Extend share * Shrink share * Replication (experimental): * Create/update/delete/promote replica operations are supported Possibilities ------------- * Any amount of ZFS zpools can be used by share driver. * Allowed to configure default options for ZFS datasets that are used for share creation. * Any amount of nested datasets is allowed to be used. * All share replicas are read-only, only active one is RW. * All share replicas are synchronized periodically, not continuously. So, status 'in_sync' means latest sync was successful. Time range between syncs equals to value of config global opt 'replica_state_update_interval'. Restrictions ------------ The ZFSonLinux share driver has the following restrictions: * Only IP access type is supported for NFS. * Only FLAT network is supported. * 'Promote share replica' operation will switch roles of current 'secondary' replica and 'active'. It does not make more than one active replica available. * 'Manage share' operation is not yet implemented. * 'SaMBa' based sharing is not yet implemented. Known problems -------------- * Better to avoid usage of Neutron on the same node where ZFS is installed. It leads to bug - https://bugs.launchpad.net/neutron/+bug/1546723 The ZFSonLinux share driver has workaround for it and requires 'nsenter' be installed on the system where ZFS is installed. * 'Promote share replica' operation will make ZFS filesystem that became secondary as RO only on NFS level. On ZFS level system will stay mounted as was - RW. Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the ZFSonLinux driver: * share_driver = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver * driver_handles_share_servers = False * replication_domain = custom_str_value_as_domain_name * if empty, then replication will be disabled * if set then will be able to be used as replication peer for other backend with same value. * zfs_share_export_ip = * zfs_service_ip = * zfs_zpool_list = zpoolname1,zpoolname2/nested_dataset_for_zpool2 * can be one or more zpools * can contain nested datasets * zfs_dataset_creation_options = * readonly,quota,sharenfs and sharesmb options will be ignored * zfs_dataset_name_prefix = * Prefix to be used in each dataset name. * zfs_dataset_snapshot_name_prefix = * Prefix to be used in each dataset snapshot name. * zfs_use_ssh = * set 'False' if ZFS located on the same host as 'manila-share' service * set 'True' if 'manila-share' service should use SSH for ZFS configuration * zfs_ssh_username = * required for replication operations * required for SSH'ing to ZFS host if 'zfs_use_ssh' is set to 'True' * zfs_ssh_user_password = * password for 'zfs_ssh_username' of ZFS host. * used only if 'zfs_use_ssh' is set to 'True' * zfs_ssh_private_key_path = * used only if 'zfs_use_ssh' is set to 'True' * zfs_share_helpers = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper * Approach for setting up helpers is similar to various other share driver * At least one helper should be used. * zfs_replica_snapshot_prefix = * Prefix to be used in dataset snapshot names that are created by 'update replica' operation. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. The :mod:`manila.share.drivers.zfsonlinux.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.zfsonlinux.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.zfsonlinux.utils` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.zfsonlinux.utils :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/scheduler.rst0000664000567000056710000000303412701407107022142 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scheduler ========= The :mod:`manila.scheduler.manager` Module ------------------------------------------ .. automodule:: manila.scheduler.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.driver` Module ----------------------------------------- .. automodule:: manila.scheduler.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.simple` Driver ----------------------------------------- .. automodule:: manila.scheduler.simple :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`scheduler` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.scheduler :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/architecture.rst0000664000567000056710000000613012701407107022646 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. Copyright 2014 Mirantis, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila System Architecture ========================== The Shared File Systems service is intended to be ran on one or more nodes. Manila uses a sql-based central database that is shared by all manila services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, manila will be moving towards multiple data stores with some kind of aggregation system. Components ---------- Below you will a brief explanation of the different components. :: /- ( LDAP ) [ Auth Manager ] --- | \- ( DB ) | | | [ Web Dashboard ]- manilaclient -[ manila-api ] -- < AMQP > -- [ manila-scheduler ] -- [ manila-share ] -- ( shared filesystem ) | | | | | < REST > * DB: sql database for data storage. Used by all components (LINKS NOT SHOWN) * Web Dashboard: external component that talks to the api. Beta extended Horizon available here: https://github.com/NetApp/horizon/tree/manila * :term:`manila-api` * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. * :term:`manila-scheduler` * :term:`manila-share` Further Challenges ------------------ * More efficient share/snapshot size calculation * Create a notion of "attached" shares with automation of mount operations * Support for Nova-network as an alternative to Neutron * Support for standalone operation (no dependency on Neutron/Nova-network) * Allow admin-created share-servers and share-networks to be used by multiple tenants * Support creation of new subnets for share servers (to connect VLANs with VXLAN/GRE/etc) * Gateway mediated networking model with NFS-Ganesha * Add support for more backends manila-2.0.0/doc/source/devref/netapp_cluster_mode_driver.rst0000664000567000056710000000651112701407107025576 0ustar jenkinsjenkins00000000000000.. Copyright 2014 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NetApp Clustered Data ONTAP =========================== The Shared File Systems service can be configured to use NetApp Clustered Data ONTAP (cDOT) version 8.2 and later. Supported Operations -------------------- The following operations are supported on Clustered Data ONTAP: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow NFS Share access * IP access type is supported for NFS. * Read/write and read-only access are supported for NFS. - Allow CIFS Share access * User access type is supported for CIFS. * Read/write access is supported for CIFS. - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Create share from snapshot - Extend share - Shrink share - Manage share - Unmanage share - Create consistency group - Delete consistency group - Create consistency group from CG snapshot - Create CG snapshot - Delete CG snapshot Supported Operating Modes ------------------------- The cDOT driver supports both 'driver_handles_share_servers' (:term:`DHSS`) modes. If 'driver_handles_share_servers' is True, the driver will create a storage virtual machine (SVM, previously known as vServers) for each unique tenant network and provision each of a tenant's shares into that SVM. This requires the user to specify both a share network as well as a share type with the DHSS extra spec set to True when creating shares. If 'driver_handles_share_servers' is False, the manila admin must configure a single SVM, along with associated LIFs and protocol services, that will be used for provisioning shares. The SVM is specified in the manila config file. Network approach ---------------- L3 connectivity between the storage cluster and manila host must exist, and VLAN segmentation may be configured. All of manila's network plug-ins are supported with the cDOT driver. Supported shared filesystems ---------------------------- - NFS (access by IP address or subnet) - CIFS (authentication by user) Required licenses ----------------- - NFS - CIFS - FlexClone Known restrictions ------------------ - For CIFS shares an external Active Directory (AD) service is required. The AD details should be provided via a manila security service that is attached to the specified share network. - Share access rules for CIFS shares may be created only for existing users in Active Directory. - The time on external security services and storage must be synchronized. The maximum allowed clock skew is 5 minutes. - cDOT supports only flat and VLAN network segmentation types. The :mod:`manila.share.drivers.netapp.common.py` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.netapp.common :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/emc_vnx_driver.rst0000664000567000056710000002334412701407107023204 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2014 EMC Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. VNX Driver ========== EMC manila driver framework (EMCShareDriver) utilizes the EMC storage products to provide the shared filesystems to OpenStack. The EMC manila driver is a plugin based driver which is designed to use different plugins to manage different EMC storage products. VNX plugin is the plugin which manages the VNX to provide shared filesystems. EMC driver framework with VNX plugin is referred to as VNX driver in this document. This driver performs the operations on VNX by XMLAPI and the File command line. Each backend manages one Data Mover of VNX. Multiple manila backends need to be configured to manage multiple Data Movers. Requirements ------------ - VNX OE for File version 7.1 or higher. - VNX Unified, File only, or Gateway system with single storage backend. - The following licenses should be activated on VNX for File: * CIFS * NFS * SnapSure (for snapshot) * ReplicationV2 (for create share from snapshot) Supported Operations -------------------- The following operations will be supported on VNX array: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * Only IP access type is supported for NFS. * Only user access type is supported for CIFS. - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Create share from snapshot While the generic driver creates shared filesystems based on Cinder volumes attached to Nova VMs, the VNX driver performs similar operations using the Data Movers on the array. Pre-Configurations on VNX ------------------------- 1. Enable Unicode on Data mover VNX driver requires that the Unicode is enabled on Data Mover. CAUTION: After enabling Unicode, you cannot disable it. If there are some filesystems created before Unicode is enabled on the VNX, consult the storage administrator before enabling Unicode. To check the Unicode status on Data Mover, use the following VNX File command on VNX control station: server_cifs | head where: mover_name = Check the value of `I18N mode` field. UNICODE mode is shown as `I18N mode = UNICODE` To enable the Unicode for Data Mover: uc_config -on -mover where: mover_name = Refer to the document `Using International Character Sets on VNX for File` on [EMC support site](http://support.emc.com) for more information. 2. Enable CIFS service on Data Mover Ensure the CIFS service is enabled on the Data Mover which is going to be managed by VNX driver. To start the CIFS service, use the following command: server_setup -Protocol cifs -option start [=] where: = [=] = Note: If there is 1 GB of memory on the Data Mover, the default is 96 threads; however, if there is over 1 GB of memory, the default number of threads is 256. To check the CIFS service status, use this command: server_cifs | head where: = The command output will show the number of CIFS threads started. 3. NTP settings on Data Mover VNX driver only supports CIFS share creation with share network which has an Active Directory security-service associated. Creating CIFS share requires that the time on the Data Mover is in sync with the Active Directory domain so that the CIFS server can join the domain. Otherwise, the domain join will fail when creating share with this security service. There is a limitation that the time of the domains used by security-services even for different tenants and different share networks should be in sync. Time difference should be less than 10 minutes. It is recommended to set the NTP server to the same public NTP server on both the Data Mover and domains used in security services to ensure the time is in sync everywhere. Check the date and time on Data Mover: server_date where: mover_name = Set the NTP server for Data Mover: server_date timesvc start ntp [ ...] where: mover_name = host = Note: The host must be running the NTP protocol. Only 4 host entries are allowed. 4. Configure User Mapping on the Data Mover Before creating CIFS share using VNX driver, you must select a method of mapping Windows SIDs to UIDs and GIDs. EMC recommends using usermapper in single protocol (CIFS) environment which is enabled on VNX by default. To check usermapper status, use this command syntax: server_usermapper where: = If usermapper is not started, the following command can be used to start the usermapper: server_usermapper -enable where: = For multiple protocol environment, refer to `Configuring VNX User Mapping` on [EMC support site](http://support.emc.com) for additional information. 5. Network Connection In the current release, the share created by VNX driver uses the first network device (physical port on NIC) of Data Mover to access the network. Go to Unisphere to check the device list: Settings -> Network -> Settings for File (Unified system only) -> Device. Backend Configuration --------------------- The following parameters need to be configured in `/etc/manila/manila.conf` for the VNX driver: emc_share_backend = vnx emc_nas_server = emc_nas_password = emc_nas_login = emc_nas_server_container = emc_nas_pool_name = share_driver = manila.share.drivers.emc.driver.EMCShareDriver - `emc_share_backend` is the plugin name. Set it to `vnx` for the VNX driver. - `emc_nas_server` is the control station IP address of the VNX system to be managed. - `emc_nas_password` and `emc_nas_login` fields are used to provide credentials to the VNX system. Only local users of VNX File is supported. - `emc_nas_server_container` field is the name of the Data Mover to serve the share service. - `emc_nas_pool_name` is the pool name user wants to create volume from. The pools can be created using Unisphere for VNX. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Restrictions ------------ The VNX driver has the following restrictions: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Only FLAT network and VLAN network are supported. - VLAN network is supported with limitations. The Neutron subnets in different VLANs that are used to create share networks cannot have overlapped address spaces. Otherwise, VNX may have a problem to communicate with the hosts in the VLANs. To create shares for different VLANs with same subnet address, use different Data Movers. - The 'Active Directory' security service is the only supported security service type and it is required to create CIFS shares. - Only one security service can be configured for each share network. - Active Directory domain name of the 'active_directory' security service should be unique even for different tenants. - The time on Data Mover and the Active Directory domains used in security services should be in sync (time difference should be less than 10 minutes). It is recommended to use same NTP server on both the Data Mover and Active Directory domains. - On VNX the snapshot is stored in the SavVols. VNX system allows the space used by SavVol to be created and extended until the sum of the space consumed by all SavVols on the system exceeds the default 20% of the total space available on the system. If the 20% threshold value is reached, an alert will be generated on VNX. Continuing to create snapshot will cause the old snapshot to be inactivated (and the snapshot data to be abandoned). The limit percentage value can be changed manually by storage administrator based on the storage needs. Administrator is recommended to configure the notification on the SavVol usage. Refer to `Using VNX SnapSure` document on [EMC support site](http://support.emc.com) for more information. - VNX has limitations on the overall numbers of Virtual Data Movers, filesystems, shares, checkpoints, and etc. Virtual Data Mover(VDM) is created by the VNX driver on the VNX to serve as the manila share server. Similarly, filesystem is created, mounted, and exported from the VDM over CIFS or NFS protocol to serve as the manila share. The VNX checkpoint serves as the manila share snapshot. Refer to the `NAS Support Matrix` document on [EMC support site](http://support.emc.com) for the limitations and configure the quotas accordingly. The :mod:`manila.share.drivers.emc.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.emc.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.emc.plugins.vnx.connection` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.emc.plugins.vnx.connection :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/fakes.rst0000664000567000056710000000351612701407107021262 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fake Drivers ============ .. todo:: document general info about fakes When the real thing isn't available and you have some development to do these fake implementations of various drivers let you get on with your day. The :mod:`fake_compute` Module ------------------------------ .. automodule:: manila.tests.fake_compute :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_driver` Module ----------------------------- .. automodule:: manila.tests.fake_driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_network` Module ------------------------------ .. automodule:: manila.tests.fake_service_instance :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_utils` Module ---------------------------- .. automodule:: manila.tests.fake_utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_volume` Module ------------------------------ .. automodule:: manila.tests.fake_volume :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/api_microversion_dev.rst0000664000567000056710000002536112701407107024401 0ustar jenkinsjenkins00000000000000API Microversions ================= Background ---------- Manila uses a framework we called 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``X-OpenStack-Manila-API-Version`` which is a monotonically increasing semantic version number starting from ``1.0``. If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``manila/api/openstack/api_version_request.py``. This value is currently ``2.0`` and is expected to remain so for quite a long time. The Nova project was the first to implement microversions. For full details please read Nova's `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new shares/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of shares/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to shares/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", label="Did we return a 500 before?"]; new_error[shape="diamond", style="", label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label="no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label="no"]; new_error -> new_attr[label="no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label="no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label="no"]; new_param -> yes[label="yes"]; new_resource -> no[label="no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** [1] - When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion. The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in manila. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both manila versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. In Code ------- In ``manila/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of >= ``2.4``. If they had specified a lower version (or not specified it and received the default of ``2.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of <= ``2.4``. If ``2.5`` or later is specified the server will respond with ``HTTP/404``. Changing a method's behaviour ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.3") def my_api_method(self, req, id): .... method_1 ... @wsgi.Controller.api_version("2.4") # noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of ``2.1``) they would see the result from ``method_1``, ``2.4`` or later ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) A method with only small changes between versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A method may have only small changes between microversions, in which case you can decorate a private method:: @api_version("2.1", "2.4") def _version_specific_func(self, req, arg1): pass @api_version(min_version="2.5") # noqa def _version_specific_func(self, req, arg1): pass def show(self, req, id): .... common stuff .... self._version_specific_func(req, "foo") .... common stuff .... A change in schema only ~~~~~~~~~~~~~~~~~~~~~~~ If there is no change to the method, only to the schema that is used for validation, you can add a version range to the ``validation.schema`` decorator:: @wsgi.Controller.api_version("2.1") @validation.schema(dummy_schema.dummy, "2.3", "2.8") @validation.schema(dummy_schema.dummy2, "2.9") def update(self, req, id, body): .... This method will be available from version ``2.1``, validated according to ``dummy_schema.dummy`` from ``2.3`` to ``2.8``, and validated according to ``dummy_schema.dummy2`` from ``2.9`` onward. When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behaviour within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behaviour based on its value:: def index(self, req): req_version = req.api_version_request if req_version.matches("2.1", "2.5"): ....stuff.... elif req_version.matches("2.6", "2.10"): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("2.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``manila/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``manila/api/openstack/api_version_request.py`` * Add a verbose description to ``manila/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the blueprint for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``X-OpenStack-Manila-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'X-OpenStack-Manila-API-Version': '2.2'} req.api_version_request = api_version.APIVersionRequest('2.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... manila-2.0.0/doc/source/devref/database.rst0000664000567000056710000000275412701407107021740 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Database Layer ================== The :mod:`manila.db.api` Module ------------------------------- .. automodule:: manila.db.api :noindex: :members: :undoc-members: :show-inheritance: The Sqlalchemy Driver --------------------- The :mod:`manila.db.sqlalchemy.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.api :noindex: The :mod:`manila.db.sqlalchemy.models` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.models :noindex: :members: :undoc-members: :show-inheritance: Tests ----- Tests are lacking for the db api layer and for the sqlalchemy driver. Failures in the drivers would be detected in other test cases, though. manila-2.0.0/doc/source/devref/launchpad.rst0000664000567000056710000000321512701407107022124 0ustar jenkinsjenkins00000000000000Project hosting with Launchpad ============================== `Launchpad`_ hosts the manila project. The manila project homepage on Launchpad is http://launchpad.net/manila. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Jenkins (see :doc:`jenkins`) Mailing list ------------ The mailing list email is ``openstack@lists.launchpad.net``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: #. Join the `Manila Team`_ on Launchpad. #. Subscribe to the list on the `OpenStack Team`_ page on Launchpad. The mailing list archives are at https://lists.launchpad.net/openstack. Bug tracking ------------ Report manila bugs at https://bugs.launchpad.net/manila Feature requests (Blueprints) ----------------------------- Manila uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/manila. Technical support (Answers) --------------------------- Manila uses Launchpad Answers to track manila technical support questions. The manila Answers page is at https://answers.launchpad.net/manila. Note that the `OpenStack Forums`_ (which are not hosted on Launchpad) can also be used for technical support requests. .. _Launchpad: http://launchpad.net .. _Wiki: http://wiki.openstack.org .. _Manila Team: https://launchpad.net/~manila .. _OpenStack Team: https://launchpad.net/~openstack .. _OpenStack Forums: http://forums.openstack.org/ manila-2.0.0/doc/source/devref/intro.rst0000664000567000056710000000352712701407107021326 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Introduction to the Shared File Systems service =========================================================== Manila is the file share service project for OpenStack. Manila provides the management of file shares for example, NFS and CIFS as a core service to OpenStack. Manila currently works with NetApp, Red Hat storage (GlusterFS) and EMC VNX, as well as on a base Linux NFS or Samba server. There are a number of concepts that will help in better understanding of the solutions provided by manila. One aspect can be to explore the different service possibilities provided by manila. Manila, depending on the driver, requires the user by default to create a share network using neutron-net-id and neutron-subnet-id (GlusterFS native driver does not require it). After creation of the share network, the user can proceed to create the shares. Users in manila can configure multiple back-ends just like Cinder. Manila has a share server assigned to every tenant. This is the solution for all back-ends except for GlusterFS. The customer in this scenario is prompted to create a share server using neutron net-id and subnet-id before even trying to create a share. The current low-level services available in manila are: - :term:`manila-api` - :term:`manila-scheduler` - :term:`manila-share` manila-2.0.0/doc/source/devref/hdfs_native_driver.rst0000664000567000056710000000610612701407107024034 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Intel, Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. HDFS native driver ================== HDFS native driver is a plugin based on the OpenStack manila service, which uses Hadoop distributed file system (HDFS), a distributed file system designed to hold very large amounts of data, and provide high-throughput access to the data. A manila share in this driver is a subdirectory in hdfs root directory. Instances talk directly to the HDFS storage backend with 'hdfs' protocol. And access to each share is allowed by user based access type, which is aligned with HDFS ACLs to support access control of multiple users and groups. Network configuration --------------------- The storage backend and manila hosts should be in a flat network, otherwise, the L3 connectivity between them should exist. Supported shared filesystems ---------------------------- - HDFS (authentication by user) Supported Operations -------------------- - Create HDFS share - Delete HDFS share - Allow HDFS Share access * Only support user access type * Support level of access (ro/rw) - Deny HDFS Share access - Create snapshot - Delete snapshot - Create share from snapshot - Extend share Requirements ------------ - Install HDFS package, version >= 2.4.x, on the storage backend - To enable access control, the HDFS file system must have ACLs enabled - Establish network connection between the manila host and storage backend Manila driver configuration --------------------------- - `share_driver` = manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver - `hdfs_namenode_ip` = the IP address of the HDFS namenode, and only single namenode is supported now - `hdfs_namenode_port` = the port of the HDFS namenode service - `hdfs_ssh_port` = HDFS namenode SSH port - `hdfs_ssh_name` = HDFS namenode SSH login name - `hdfs_ssh_pw` = HDFS namenode SSH login password, this parameter is not necessary, if the following `hdfs_ssh_private_key` is configured - `hdfs_ssh_private_key` = Path to the HDFS namenode private key to ssh login Known Restrictions ------------------ - This driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported by the tenant specific user authentication - Only support for single HDFS namenode in Kilo release The :mod:`manila.share.drivers.hdfs.hdfs_native` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hdfs.hdfs_native :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/i18n.rst0000664000567000056710000000230212701407107020740 0ustar jenkinsjenkins00000000000000Internationalization ==================== Manila uses `gettext `_ so that user-facing strings such as log messages appear in the appropriate language in different locales. To use gettext, make sure that the strings passed to the logger are wrapped in a ``_()`` function call. For example:: LOG.info(_("block_device_mapping %s"), block_device_mapping) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause the LocalizationTestCase.test_multiple_positional_format_placeholders test to fail in manila/tests/test_localization.py. The ``_()`` function is brought into the global scope by doing:: from manila.openstack.common import gettextutils gettextutils.install("manila") These lines are needed in any toplevel script before any manila modules are imported. If this code is missing, it may result in an error that looks like:: NameError: name '_' is not defined manila-2.0.0/doc/source/devref/share_hooks.rst0000664000567000056710000000625512701407107022501 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila share driver hooks ========================= Manila share driver hooks are designed to provide additional possibilities for each :term:`manila-share` service; such as any kind of notification and additional actions before and after share driver calls. Possibilities ------------- - Perform actions before some share driver method calls. - Perform actions after some share driver method calls with results of driver call and preceding hook call. - Call additional 'periodic' hook each 'N' ticks. - Possibility to update results of driver's action by post-running hook. Features -------- - Errors in hook execution can be suppressed. - Any hook can be disabled. - Any amount of hook instances can be run at once for each manila-share service. Limitations ----------- - Hooks approach is not asynchronous. That is, if we run hooks, and especially, more than one hook instance, then all of them will be executed in one thread. Implementation in share drivers ------------------------------- Share drivers can [re]define method `get_periodic_hook_data` that runs with each execution of 'periodic' hook and receives list of shares (as parameter) with existing access rules. So, each share driver, for each of its shares can add/update some information that will be used then in the periodic hook. What is required for writing new 'hook' implementation? ------------------------------------------------------- All implementations of 'hook' interface are expected to be in 'manila/share/hooks'. Each implementation should inherit class 'manila.share.hook:HookBase' and redefine its abstract methods. How to use 'hook' implementations? ---------------------------------- Just set config option 'hook_drivers' in driver's config group. For example:: [MY_DRIVER] hook_drivers=path.to:FooClass,path.to:BarClass Then all classes defined above will be initialized. In the same config group, any config option of hook modules can be redefined too. .. note:: More info about common config options for hooks can be found in module `manila.share.hook` Driver methods that are wrapped with hooks ------------------------------------------ - allow_access - create_share_instance - create_snapshot - delete_share_instance - delete_share_server - delete_snapshot - deny_access - extend_share - init_host - manage_share - publish_service_capabilities - shrink_share - unmanage_share Above list with wrapped methods can be extended in future. The :mod:`manila.share.hook.py` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.hook :noindex: :members: :undoc-members: :show-inheritance: manila-2.0.0/doc/source/devref/share.rst0000664000567000056710000000230712701407107021270 0ustar jenkinsjenkins00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Shared Filesystems ================== .. todo:: rework The :mod:`manila.share.manager` Module -------------------------------------- .. automodule:: manila.share.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.driver` Module ------------------------------------- .. automodule:: manila.share.driver :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: FakeAOEDriver manila-2.0.0/doc/source/devref/share_back_ends_feature_support_mapping.rst0000664000567000056710000004705712701407112030312 0ustar jenkinsjenkins00000000000000.. Copyright 2015 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila share features support mapping ===================================== Here we provide information on support of different share features by different share drivers. Column values contain the OpenStack release letter when a feature was added to the driver. Column value "?" means that this field requires an update with current information. Column value "-" means that this feature is not currently supported. Mapping of share drivers and share features support --------------------------------------------------- +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Driver name | create/delete share | manage/unmanage share | extend share | shrink share | create/delete snapshot | create share from snapshot | manage/unmanage snapshot | +========================================+=============================+=======================+==============+==============+========================+============================+==========================+ | ZFSonLinux | DHSS = False (M) | \- | M | M | M | M | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Generic (Cinder as back-end) | DHSS = True (J) & False (K) | K | L | L | J | J | DHSS = False (M) | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | NetApp Clustered Data ONTAP | DHSS = True (J) & False (K) | L | L | L | J | J | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | EMC VNX | DHSS = True (J) | \- | \- | \- | J | J | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | EMC Isilon | DHSS = False (K) | \- | M | \- | K | K | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Red Hat GlusterFS | DHSS = False (J) | \- | \- | \- | volume layout (L) | volume layout (L) | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Red Hat GlusterFS-Native | DHSS = False (J) | \- | \- | \- | K | L | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | HDFS | DHSS = False (K) | \- | M | \- | K | K | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Hitachi HNAS | DHSS = False (L) | L | L | M | L | L | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | HPE 3PAR | DHSS = True (L) & False (K) | \- | \- | \- | K | K | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Huawei | DHSS = True (M) & False(K) | L | L | L | K | M | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | IBM GPFS | DHSS = False(K) | \- | L | \- | K | K | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | LVM | DHSS = False (M) | \- | M | \- | M | M | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Quobyte | DHSS = False (K) | \- | M | M | \- | \- | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Windows SMB | DHSS = True (L) & False (L) | L | L | L | L | L | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Oracle ZFSSA | DHSS = False (K) | \- | M | M | K | K | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | CephFS Native | DHSS = False (M) | \- | M | M | M | \- | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ | Tegile | DHSS = False (M) | \- | M | M | M | M | \- | +----------------------------------------+-----------------------------+-----------------------+--------------+--------------+------------------------+----------------------------+--------------------------+ .. note:: See details for :term:`DHSS` Mapping of share drivers and share access rules support ------------------------------------------------------- +----------------------------------------+-----------------------------------------------------------+---------------------------------------------------------+ | | Read & Write | Read Only | + Driver name +--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | | IP | USER | Cert | CephX | IP | USER | Cert | CephX | +========================================+==============+================+============+==============+==============+================+============+============+ | ZFSonLinux | NFS (M) | \- | \- | \- | NFS (M) | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Generic (Cinder as back-end) | NFS,CIFS (J) | \- | \- | \- | NFS (K) | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | NetApp Clustered Data ONTAP | NFS (J) | CIFS (J) | \- | \- | NFS (K) | CIFS (M) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | EMC VNX | NFS (J) | CIFS (J) | \- | \- | NFS (L) | CIFS (L) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | EMC Isilon | NFS,CIFS (K) | CIFS (M) | \- | \- | NFS (M) | CIFS (M) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Red Hat GlusterFS | NFS (J) | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Red Hat GlusterFS-Native | \- | \- | J | \- | \- | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | HDFS | \- | HDFS(K) | \- | \- | \- | HDFS(K) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Hitachi HNAS | NFS (L) | \- | \- | \- | NFS (L) | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | HPE 3PAR | NFS,CIFS (K) | CIFS (K) | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Huawei | NFS (K) |NFS (M),CIFS (K)| \- | \- | NFS (K) |NFS (M),CIFS (K)| \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | LVM | NFS (M) | CIFS (M) | \- | \- | NFS (M) | CIFS (M) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Quobyte | NFS (K) | \- | \- | \- | NFS (K) | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Windows SMB | \- | CIFS (L) | \- | \- | \- | CIFS (L) | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | IBM GPFS | NFS (K) | \- | \- | \- | NFS (K) | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Oracle ZFSSA | NFS,CIFS(K) | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | CephFS Native | \- | \- | \- | CEPH(M) | \- | \- | \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ | Tegile | NFS (M) |NFS (M),CIFS (M)| \- | \- | NFS (M) |NFS (M),CIFS (M)| \- | \- | +----------------------------------------+--------------+----------------+------------+--------------+--------------+----------------+------------+------------+ Mapping of share drivers and security services support ------------------------------------------------------ +----------------------------------------+------------------+-----------------+------------------+ | Driver name | Active Directory | LDAP | Kerberos | +========================================+==================+=================+==================+ | ZFSonLinux | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Generic (Cinder as back-end) | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | NetApp Clustered Data ONTAP | J | J | J | +----------------------------------------+------------------+-----------------+------------------+ | EMC VNX | J | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | EMC Isilon | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Red Hat GlusterFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Red Hat GlusterFS-Native | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | HDFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Hitachi HNAS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | HPE 3PAR | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Huawei | M | M | \- | +----------------------------------------+------------------+-----------------+------------------+ | LVM | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Quobyte | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Windows SMB | L | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | IBM GPFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Oracle ZFSSA | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | CephFS Native | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Tegile | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ manila-2.0.0/doc/Makefile0000664000567000056710000000637212701407107016327 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXSOURCE = source PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest .DEFAULT_GOAL = html help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* -rm -rf manila.sqlite if [ -f .autogenerated ] ; then \ cat .autogenerated | xargs rm ; \ rm .autogenerated ; \ fi html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/manila.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/manila.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." manila-2.0.0/doc/ext/0000775000567000056710000000000012701407265015464 5ustar jenkinsjenkins00000000000000manila-2.0.0/doc/ext/__init__.py0000664000567000056710000000000012701407107017556 0ustar jenkinsjenkins00000000000000manila-2.0.0/doc/ext/manila_autodoc.py0000664000567000056710000000042612701407107021012 0ustar jenkinsjenkins00000000000000from __future__ import print_function import gettext import os gettext.install('manila') from manila import utils def setup(app): print("**Autodocumenting from %s" % os.path.abspath(os.curdir)) rv = utils.execute('./doc/generate_autodoc_index.sh') print(rv[0]) manila-2.0.0/doc/ext/manila_todo.py0000664000567000056710000000647112701407107020327 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # This is a hack of the builtin todo extension, to make the todo_list # more user friendly from six import moves from sphinx.ext.todo import * import re def _(s): return s def process_todo_nodes(app, doctree, fromdocname): if not app.config['todo_include_todos']: for node in doctree.traverse(todo_node): node.parent.remove(node) # Replace all todolist nodes with a list of the collected todos. # Augment each todo with a backlink to the original location. env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] # remove the item that was added in the constructor, since I'm tired of # reading through docutils for the proper way to construct an empty list lists = [] for i in moves.range(5): lists.append(nodes.bullet_list("", nodes.Text('', ''))) lists[i].remove(lists[i][0]) lists[i]['classes'].append('todo_list') for node in doctree.traverse(todolist): if not app.config['todo_include_todos']: node.replace_self([]) continue for todo_info in env.todo_all_todos: para = nodes.paragraph() filename = env.doc2path(todo_info['docname'], base=None) # Create a reference newnode = nodes.reference('', '') line_info = todo_info['lineno'] link = _('%(filename)s, line %(line_info)d') % locals() innernode = nodes.emphasis(link, link) newnode['refdocname'] = todo_info['docname'] try: newnode['refuri'] = app.builder.get_relative_uri( fromdocname, todo_info['docname']) newnode['refuri'] += '#' + todo_info['target']['refid'] except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output pass newnode.append(innernode) para += newnode para['classes'].append('todo_link') todo_entry = todo_info['todo'] env.resolve_references(todo_entry, todo_info['docname'], app.builder) item = nodes.list_item('', para) todo_entry[1]['classes'].append('details') comment = todo_entry[1] m = re.match(r"^P(\d)", comment.astext()) priority = 5 if m: priority = int(m.group(1)) if priority < 0: priority = 1 if priority > 5: priority = 5 item['classes'].append('todo_p' + str(priority)) todo_entry['classes'].append('todo_p' + str(priority)) item.append(comment) lists[priority - 1].insert(0, item) node.replace_self(lists) def setup(app): app.add_config_value('todo_include_todos', False, False) app.add_node(todolist) app.add_node(todo_node, html=(visit_todo_node, depart_todo_node), latex=(visit_todo_node, depart_todo_node), text=(visit_todo_node, depart_todo_node)) app.add_directive('todo', Todo) app.add_directive('todolist', TodoList) app.connect('doctree-read', process_todos) app.connect('doctree-resolved', process_todo_nodes) app.connect('env-purge-doc', purge_todos) manila-2.0.0/doc/generate_autodoc_index.sh0000775000567000056710000000177112701407107021723 0ustar jenkinsjenkins00000000000000#!/bin/sh SOURCEDIR=doc/source/api if [ ! -d ${SOURCEDIR} ] ; then mkdir -p ${SOURCEDIR} fi for x in `./find_autodoc_modules.sh`; do echo "Generating ${SOURCEDIR}/${x}.rst" echo "${SOURCEDIR}/${x}.rst" >> .autogenerated heading="The :mod:\`${x}\` Module" # Figure out how long the heading is # and make sure to emit that many '=' under # it to avoid heading format errors # in Sphinx. heading_len=$(echo "$heading" | wc -c) underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') ( cat < ${SOURCEDIR}/${x}.rst done if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst done echo ${SOURCEDIR}/autoindex.rst >> .autogenerated fi manila-2.0.0/doc/README.rst0000664000567000056710000000164612701407107016355 0ustar jenkinsjenkins00000000000000================= Building the docs ================= Dependencies ============ Sphinx_ You'll need sphinx (the python one) and if you are using the virtualenv you'll need to install it in the virtualenv specifically so that it can load the manila modules. :: pip install Sphinx Graphviz_ Some of the diagrams are generated using the ``dot`` language from Graphviz. :: sudo apt-get install graphviz .. _Sphinx: http://sphinx.pocoo.org .. _Graphviz: http://www.graphviz.org/ Use `make` ========== Just type make:: % make Look in the Makefile for more targets. Manually ======== 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: % ./generate_autodoc_index.sh > source/code.rst 2. Run `sphinx_build`:: % sphinx-build -b html source build/html The docs have been built ======================== Check out the `build` directory to find them. Yay! manila-2.0.0/doc/.gitignore0000664000567000056710000000004512701407107016646 0ustar jenkinsjenkins00000000000000_build/* source/api/* .autogenerated manila-2.0.0/LICENSE0000664000567000056710000002363712701407107015132 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. manila-2.0.0/run_tests.sh0000775000567000056710000001735112701407107016506 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Manila's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo " --virtual-env-path Location of the virtualenv directory." echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory." echo " Default: .venv" echo " --tools-path

    Location of the tools directory." echo " Default: \$(pwd)" echo " --concurrency How many processes to use when running the tests." echo " A value of 0 autodetects concurrency from your CPU count." echo " Default: 1" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; --concurrency) (( i++ )) concurrency=${!i} ;; -*) testropts="$testropts ${!i}";; *) testrargs="$testrargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testrargs= testropts= wrapper="" just_pep8=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 concurrency=1 process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function init_testr { if [ ! -d .testrepository ]; then ${wrapper} testr init fi } function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then # Default to running all tests if specific test is not # provided. testrargs="discover ./manila/tests" fi ${wrapper} python -m testtools.run $testropts $testrargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS" fi # Just run the test suites in current environment set +e testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'" if [ setup.cfg -nt manila.egg-info/entry_points.txt ]; then ${wrapper} python setup.py egg_info fi echo "Running \`${wrapper} $TESTRTESTS\`" if ${wrapper} which subunit-2to1 2>&1 > /dev/null; then # subunit-2to1 is present, testr subunit stream should be in version 2 # format. Convert to version one before colorizing. bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py" else bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py" fi RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='manila/*' --omit='manila/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function run_pep8 { echo "Running flake8..." bash -c "${wrapper} flake8" } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ]; then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi init_testr run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testropts), which begin with a '-', and # arguments (testrargs). if [ -z "$testrargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi manila-2.0.0/ChangeLog0000664000567000056710000017637112701407264015707 0ustar jenkinsjenkins00000000000000CHANGES ======= 2.0.0 ----- * Fix call of clients in post_test_hook.sh * Add tests to ensure snapshots across replicas * NetApp cDOT: Handle replicated snapshots * Data Replication: Ensure Snapshots across replicas * Fix update_access concurrency issue * Fix manage API ignoring type extra specs * Make ZFSonLinux driver handle snapshots of replicated shares properly * Fix keystone v3 issues for all clients * Fix for incorrect LVMMixin exception message * NetApp cDOT: Fix status updates for replicas * NetApp cDOT: Raise ShareResourceNotFound in update_access * Fix generic and LVM driver access rules for CIDRs * Fix report of ZFSonLinux driver capabilities * Fix the scheduler choose a disable share service * Fix typos * Fix error logged for wrong HPE 3par client * 3PAR remove file tree on delete when using nested shares * HDS-HNAS: Fix exception in update_access not found * Revert "LXC/LXD driver" * Fix Hitachi HNAS driver version * service instance: also recognize instance name * Fix update of access rules in ZFSonLinux driver * Check share-network in 'share create' API * glusterfs volume layout: take care of deletion of DOA shares * Fix delete when share not found in update_access * Remove default values for update_access() * NetApp cDOT driver should not split clones * Fix handling of share server details after error * HDS-HNAS: fixed exception when export not found * Fix lock decorator usage for LVM and Generic drivers * Fix HNAS snapshot creation on deleted shares * Move iso8601 from requirements to test-requirements * Fix typos * glusterfs.common: GlusterManager.gluster_call error report fix * glusterfs.GlusterNFSVolHelper: remove __init__ * Add tempest tests for Share Replication * register the config generator default hook with the right name * Windows driver: fix share access actions * Collapse common os_region_name option * Disallow scheduling multiple replicas on a given pool * update quota of origin user on share extend/shrink * Update quota of proper user on resource delete * Fix Share Migration access rule mapping * Fix unstable DB migration tests * Fix Share Migration KeyError on dict.pop * NetApp cDOT APIs may get too little data * HNAS: Enable no_root_squash option when allowing access to a share * Fix HNAS driver crash with unmounted filesystems * Fix compatibility with Tempest * Set proper image name for tempest * Remove nsenter dependency * Fix ZFSonLinux driver share replica SSHing * Fix ZFSonLinux access rules for CIDRs * Fix HNAS driver thin_provisioning support * Fix pylxd hard dependencies * Squash consequent DB calls in create_share_instance * Fix slow unit test * Run ZfsOnLinux gate tests with SSH enabled * Fix status update for replicas * Set TCP keepalive options * Fix manila devstack plugin for keystone v3 usage * Add /usr/local/{sbin,bin} to rootwrap exec_dirs * Updated from global requirements * Use official location for service image * Allow devstack plugin to work without Cinder * Download service image only when needed * glusterManager instantiation regexp validation 2.0.0.0b3 --------- * Moved CORS middleware configuration into oslo-config-generator * Move Share Migration code to Data Service * Remove unintended exposure of private attribute * Add share driver for Tegile IntelliFlash Arrays * Update tempest commit and switch to tempest.lib * LXC/LXD driver * Update export location retrieval APIs * Huawei driver improve support of StandaloneNetworkPlugin * Add Ceph Native driver * Introduced Data Service * Implement admin network in generic driver * NetApp: Add Replication support in cDOT * Fix NFS helper root squashing in RW access level * Add ZFSonLinux share driver * glusterfs.common: move the numreduct function to toplevel * glusterfs_native: relocate module under glusterfs * Huawei driver code review * Add QoS description in Huawei * glusterfs/ganesha: add symbolic access-id to export location * Add share resize support to Oracle ZFSSA driver * Implement update_access() method in huawei driver * Update Huawei driver doc for Mitaka * Remove unused pngmath Sphinx extension * Implement update_access() in generic driver + LVM * Add doc for export location metadata * gluster*: clean up volume option querying * Admin networks in NetApp cDOT multi-SVM driver * Support export location metadata in NetApp cDOT drivers * Change sudo to run_as_root in LVM driver * Huawei driver: change CIFS rw to full control * Updated from global requirements * Fix NetApp cDOT driver update_access negative test * Define context.roles with base class * Subclass context from oslo_context base class * Add Replication admin APIs and driver i/f changes * glusterfs/common: don't suppress vol set errors * Improve exception msg when attaching/detaching volumes * Use assertIsNone instead of assertEqual(None, ***) * Scheduler enhancements for Share Replication * Fix typo in comment message * Remove aggressive assert from share server test * Fix scenario tests * EMC Isilon Driver Support For CIFS Read-Only Share * Add update_access() interface to Quobyte driver * Check for device node availability before mkfs * Replace TENANT => PROJECT for manila plugin * Validate qos during share creation * Fix doc string in driver interface * Fix neutron port concurrency in generic driver * Add additional documentation on extra spec operations * Implement update_access() method in Hitachi HNAS driver * Fix share migration tests in gate * Update help text for some service instance config opts * Three ways to set Thin/Thick Type in Huawei driver * Squash E006 bashate warnings * Implement update_access() in NetApp cDOT drivers * Add tox fast8 option * Use ostestr to run unit test * Make consistency group timeout exception message more robust * Manage and unmanage snapshot * Stop proxying share_server_id through share in share.manager * Remove deprecated share attribute usage from manila.share.api * Get host from share['instance'] in share RPC API * Cleanup deprecation warnings from using share proxy properties in API * Add possibility to skip quota tests in Tempest * Remove default=None from config options * Add space to message in manila_tempest_tests/tests/api/test_shares.py * Fix rpcapi identifiers for better readability * Add admin network for DHSS=True share drivers * Allow DHSS=False tests to override Tempest concurrency * Remove `None` as a redundant argument to dict.get() * gluster*: add proper getter/setters for volume options * Unify usage of project name in doc to 'manila' * Removed ignored checks from tox.ini and fixed pep8 issues * Updated from global requirements * Fix tempest test for export locations API * Support devstack install without nova * EMC Isilon Driver Support For NFS Read-Only Share * replace string format arguments with function parameters * Converted MultiStrOpt to ListOpt * Fix Hitachi HNAS Driver default helper * Use existing "insecure" options when creating nova/cinder clients * Fix Share Replica details in the API * Share Replication API and Scheduler Support * Fixed Hitachi HNAS slow test * Replace 'stack' with $STACK_USER in devstack plugin * Replace deprecated oslo_messaging _impl_messaging * Avoid KeyError on instance_id in ensure_service_instance * Hitachi HNAS driver share shrink * LVM driver: Pass '--units g' to vgs invocation * Updated from global requirements * Fix scheduling with instance properties * Add update_access() method to driver interface * Fix issue in hacking with underscore imports * Added Keystone and RequestID headers to CORS middleware * Ext. exception handling for httplib and socket errors in Quobyte driver * Huawei: Create share from snapshot support in Huawei driver * Don't convert share object to dict on create * Fix Cinder's NoValidHostFound errors * Remove outdated pot files * Fix Devstack and Manila-ui interaction * Fix devstack function call recreate db * tempest: wait for deletion of cert rule * Bump tempest version * Fix params order in assertEqual * Removed unnecessary string conversions on Hitachi HNAS Driver * Add feature support information of Oracle ZFSSA Manila driver * extra-specs should work with string True/False * Fix db shim layer mismatches with implementation * TrivialFix: Remove deprecated option 'DEFAULT/verbose' * isoformat instead of deprecated timeutils.isotime 2.0.0.0b2 --------- * Return appropriate data on share create * Hitachi HNAS driver refactoring * Trivial Fix: fix missing import * Remove unused server_get() method * QoS support for Huawei Driver * Add LVM driver * Fix release of resources created by Tempest * Fix access rules tempest v2 client * Huawei: Ensure that share is exported * Using dict.items() is better than six.iteritems(dict) * Updated from global requirements * gluster*: refactor gluster_call * Fix pep8 failure * Fix Mutable default argument * Fix devstack in non-neutron environments * Fix usage of standlone_network_plugin * Implement export location metadata feature * Doc: Remove prerequisite: Ubuntu * Hide snapshots with no instances from listing * QoS support for shares * Huawei: Add share server support * Isilon Driver: Update Share Backends Feature Doc * Clean up removed hacking rule from [flake8] ignore lists * Fix Manila tempest tests * Adds extend_share for Quobyte shares * Update NetApp driver support matrix line * Fix response code for various NotFound exceptions * Huawei driver report pool capabilities [True, False] * Fix 'extend' API for 2.7+ microversions * Replace assertEqual(None, *) with assertIsNone in tests * Delete Share Instance of unmanaged share * Add debug testenv in tox * A tempest test in services API using unsafe assert * Cannot return a value from __init__ * Make Manila UI be installed after Horizon * Use new approach for setting up CI jobs * Add doc for share driver hooks * Add more documentation to share/driver * Fix grammatical mistake, Changed character from "an" to "a" * Huawei: Add manage share with share type in Huawei driver * Refactor share metadata tests to use DB * Replace deprecated [logger/LOG].warn with warning * Add snap reserve config option to NetApp cDOT driver * Updated from global requirements * Fix tempest case "test_delete_ss_from_sn_used_by_share_server" * Fix CI Tempest jobs * glusterfs/vol layout: remove manila-created vols upon delete_share * Use constants instead of literals in Huawei Driver * Fix unit test of ShareSnapshotNotFound * Fix handling of Novaclient exceptions * Drop MANIFEST.in - it's not needed with PBR * Replace deprecated library function os.popen() with subprocess * Change assertTrue(isinstance()) by optimal assert * EMC Isilon Driver Doc Update for Extend Share * [docs] Fix table elements view on page with list of supported features * Trivial: Remove unused logging import * Set timeout for parmiko ssh connection * Fix wrong flake8 exception and pep8 violations * Remove unused oslo-incubator _i18n.py from Manila * Deprecated tox -downloadcache option removed * Keep py3.X compatibility for urllib * EMC VNX: Fix the interface garbage in VNX backend * EMC Isilon Driver Support For Extend Share * HPE3PAR finds CIFS share with either prefix * Improve tempest tests for shares listing APIs * Updated from global requirements * Support standard Manila capability flags in NetApp cDOT driver * Mock out service availability check in unit test * Capability lists in Manila scheduler * HPE3PAR support for share extend and shrink * Pop off user/tenant kwargs in RequestContext init * Move the config environment variables into devstack/settings file * glusterfs: document Gluster NFS misbehavior * Change instance service default path for private key to None * Use isoformat() instead of timeutils.strtime() * EMC VNX: Add multi-pools support * Add space to message in manila/consistency_group/api.py * Remove duplicate keys from dictionary * Fix Tempest microversion comparison approach * Prevent removal of share server used by CG * HPE3PAR support for access-level (ro,rw) * Performance: leverage dict comprehension in PEP-0274 * Updated from global requirements * Document correction in quick_start.rst * glusterfs_native: fix parsing of the dynamic-auth option * Fix wrong check message * NetApp cDOT driver should support read-only CIFS shares * Do not allow to modify access for public share type * EMC VNX: Add share extend support * Allow to set share visibility using "manage" API * Remove version per M-1 release instructions * Updated from global requirements * [CI] Speed up Tempest jobs * Avoid service_instance neutron port clash in HA 2.0.0.0b1 --------- * EMC: Fix bugs when domain controller is not available * Put py34 first in the env order of tox * Move API module 'share_instances' under v2 dir * Change manila_tempest_tests to use credentials_factory * timeutils.total_seconds() is deprecated * Reorganize scheduler and merge code from Oslo incubator * glusterfs: add missing i18n import * Fix Share status precedence based on instances * doc: document the non-standard export semantics of Ganesha * Liberty doc updates for GlusterFS drivers * Add new URLs for APIs ported from extensions * Updated from global requirements * NetApp cDOT multi-SVM driver can't handle duplicate addresses * Remove mention of isilon_share_root_dir * Add share-networks validation * Simplify ping usage for service VM check in CI * Improve Tempest tests for consistency groups * Add sleep to CI hooks to avoid races * add Red Hat GlusterFS drivers feature support info * Add reno for release notes management * Delete python bytecode before every test run * Updated from global requirements * Add support of 'network_type' to standalone network plugin * Fix import of devstack functions for common CI script * Last sync to Manila from oslo-incubator * glusterfs/volume layout: indicate volume usage on volumes themselves * glusterfs/volume layout: fix incorrect usage of export_location * Refactor authorize() method in wsgi.py * Implements ensure_share() in Quobyte driver * Prevent Share operations during share migration * Fix typo on quota limit error message * Refactor HP 3PAR share driver to now be HPE * OpenStack typo * Added driver minimum requirements and features doc * Remove httplib2 useless requirement * Added CONTRIBUTING file in .rst format * HPE3PAR create share from snapshot fails * Updated from global requirements * EMC VNX Manila Driver Refactoring * Updated from global requirements * Port share type extensions to core API * Port admin actions extension to core API * Use oslo_config new type PortOpt for port options * Added CORS support to Manila * Split common logic of CI hooks to separate file * Port share actions to core API * Port quotas to core API * Port services to core API * remove default=None for config options * Add mount automation example based on Zaqar * Make setup.py install Manila Tempest plugin * Sync Manila Tempest plugin with latest Tempest * Port manage/unmanage extensions to core API * Updated from global requirements * Rephrase comments for Share create API * Use assertTrue/False instead of assertEqual(T/F) * Fix no-share-servers CI job * Use default Keystone API version in Devstack * Updated from global requirements * Port availability zones to core API * Generic driver: wait for common server during setup * Port used limits to core API * Updated from global requirements * Add IBM GPFS Manila driver * Fix list-availability-zones API for PostgreSQL * Fix share type model scalability for get request 1.0.0 ----- * Fix usage of dependencies * Fix usage of dependencies * Use 'False' as default value for "compression" common capability * Stop using deprecated tempest options * Make share service understand driver init failure * Fix broken unit tests * Enable extend_share in HDFS driver * Verify common server in Generic driver on startup * Updated from global requirements * Improve Manila HDS HNAS Driver Manual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Update feature support matrix for Windows SMB 1.0.0.0rc2 ---------- * Share manager: catch exception raised by driver's setup() * Fix display of availability-zone for manila-manage command * glusterfs_native: use dynamic-auth option if available * Fix setting of "snapshot_support" extra spec for tempest * Fix deletion of error state access rules * Fix response data for API access-allow * Fix display of availability-zone for manila-manage command * glusterfs: check nfs.export-volumes with Gluster NFS + vol layout * glusterfs: manage nfs.rpc-auth-allow not being set * glusterfs vol layout: start volume cloned from snapshot * glusterfs_native: use dynamic-auth option if available * NetApp cDOT driver isn't reentrant * Can't create shares on drivers that don't support snapshots * Revert netapp_lib dependency in NetApp cDOT Manila drivers * Set defaultbranch to stable/liberty in .gitreview * Feature support matrix update for HP 3PAR * Fix `test_trans_add` for Python 3.4.3 * Remove misleading snapshot methods from Quobyte driver * Fix response data for API access-allow * Improve logging of calls in ShareManager * Use random IPs in security service tests * EMC Isilon Manila Driver Feature Support * Fix deletion of error state access rules * Fix order of arguments in assertEqual * glusterfs vol layout: start volume cloned from snapshot * Fix order of arguments in assertEqual * NetApp cDOT driver isn't reentrant * Fix mentioned DEFAULT_API_VERSION in doc * Revert netapp_lib dependency in NetApp cDOT Manila drivers * Fix `test_trans_add` for Python 3.4.3 * Adds Quobyte share backend feature support mapping data * Remove language about future features from driver doc * Remove LegacyFormatter from logging_sample.conf * Fix setting of "snapshot_support" extra spec for tempest * Fix some spelling typo in manual and error message * glusterfs: check nfs.export-volumes with Gluster NFS + vol layout * glusterfs: manage nfs.rpc-auth-allow not being set * Can't create shares on drivers that don't support snapshots * Add Huawei driver details in doc * Add Hitachi HNAS driver documentation * Open Mitaka development 1.0.0.0rc1 ---------- * glusterfs*: fix ssh credential options * Make Quobyte shares actually read-only when requested * Fixes a Quobyte backend call issue with a wrong field name * Fix error response when denying snapshot creation * Fix 'cover' tox job * glusterfs: fix gluster-nfs export for volume mapped layout * Updated from global requirements * Fix experimental=True for view in microversion 2.5 * glusterfs_native: Hardwire Manila Host CN pattern * Fix HDS HNAS manage incorrect share size * glusterfs*: amend export location * Fix HDS HNAS Create from snapshot ignoring Size * Fix pool_list filter tests to match pools exactly * Non-admin user can perform 'extra-specs-list' * Fix improper handling of extending error * Update feature support mapping doc for NetApp cDOT * Remove IBM GPFS driver due to lack of CI * Add 'snapshot_support' attr to share details * Fix get_stats to return real used space in HNAS * Add new features description in Huawei doc * Fix API version history in Huawei driver * Fix task_state field shown on API < 2.5 * glusterfs: Fix use of ShareSnapshotInstance object * NetApp cDOT driver should prefer aggregate-local LIFs * Fix HDS HNAS snapshot creation tracking * Return share_type UUID instead of name in Share API * doc: turn ascii art tables into proper reST grid tables * Make scenario tests able to run with DHSS=False * Fix missing value types for log message * glusterfs_native: Fix typo for protocol compatibility * Fix typo in test_hook * Fix Share Migration tempest tests * Remove support for 'latest' microversion * Adds retry function to HNAS driver * Corrects capabilities returned by Quobyte Manila driver * Fix create snapshot API in Huawei driver * Check the snapshot directory before copy it * Remove HDS SOP driver due to lack of CI * Missing check in ShareManager::manage_existing() * Add v2 Manila API path as base for microversions * Huawei driver: fix reports reduplicate pools * Enhance base driver checking if a method was implemented * Updated from global requirements * Allow service image download to be skipped * Use 'False' as default value for "dedupe" common capability * Capacity filter should check free space if total space is unknown * Fix usage of novaclient * NetApp cDOT driver with vserver creds can't create shares * Fix unstable unit test 'test_get_all_host_states_share' * Fix concurrency issue in tempest test * Fix description in Huawei driver * Replaces xrange() with range() for py 2/3 compatibility * Updated from global requirements * Consistency groups in NetApp cDOT drivers * Fix keypair creation * Add functional tests for Manila consistency groups * Place tempest microversions test module in proper place * Consistency Group Support for the Generic Driver * Add Share Migration tempest functional tests * Share Migration support in generic driver * Add Share Migration feature * glusterfs: directory mapped share layout * glusterfs: volume mapped share layout * glusterfs/layout: add layout base classes * Add Consistency Groups API * Scheduler changes for consistency groups * Add DB changes for consistency-groups * Use Tempest plugin interface * Make devstack plugin independent from default Identity API version * glusterfs-native: cut back on redundancy * glusterfs/common: refactor GlusterManager * glusterfs*: factor out common parts * Add share hooks * Add possibility to setup password for generic driver * Use devstack functions for registering Manila * devstack plug-in to reflect new manila-ui plug-in * HP 3PAR extra-spec prefix needs to be hp3par * Fix the typo "version" * Updated from global requirements 1.0.0.0b3 --------- * Add attributes 'name' and 'share_name' to ShareSnapshotInstance * Fix data copying issue in DB migration 1f0bd302c1a6 * HP 3PAR driver handles shares servers * Updated from global requirements * Fix failing Quobyte unit test * Remove instances of "infinite" capacity from Manila * Replace thin/thick capabilities with thin_provisioning * Add Share instances Admin API * Add Windows SMB share driver * Remove ununsed dependency: discover * Implement snapshot tracking in HDS HNAS driver * Use Share Instance ID in 'name' property * Ignore git backup merge files * Tempest: wrong assertion on the number of shares created * Ignore unavailable volumes when deleting a share * Updated from global requirements * New Manila HDS HNAS Driver * Tempest: wait for access rule to be deleted * Fix Tempest tests targeting user based access rules * glusterfs_native: Add create share from snapshot * Generic driver:Create Cinder volume in correct AZ * Reduce dependency to tempest: exceptions * Add possibility to filter back ends by snapshot support * Add tempest tests for "cert" based access type * Clean up admin_actions API extension unit tests * Use service availability_zone for Share Server VM * Add availability zones support * Add methods for share instances in Share API * Add compression in common capabilities doc * HP 3PAR add more info to the share comment * Add tempest tests for REST API microversions * Huawei driver support smartcache and smartpartition * Manila experimental REST APIs * Fix compatibility with sqlalchemy 0.9.7 * Updated from global requirements * Fix incorrect use of snapshot instances * HP 3PAR reports capabilities * Lazy Load Services * Replace assertEqual(None, *) with assertIsNone in tests * Updated from global requirements * Fix incorrect variable name in some exception class * Update NetApp cDOT Manila drivers to use netapp_lib * Add manage/unmanage support to NetApp cDOT driver * Service Instance: Add instance reboot method * Add WinRM helper * Common capabilities documentation * Fix Neutron config setting in pre_test_hook * Add share instances and snapshot instances * Fix extend share API in Huawei driver * Huawei driver support dedup, compression, thin and thick * Fix the log level in scheduler manage * Enable Tempest tests for glusterfs/hdfs protocols * Support shrink_share in NetApp cDOT drivers * Fix sample config file generation * Change huawei driver send REST command serially * Support extend_share in NetApp cDOT drivers * Fix for Isilon driver failing to connect * Updated from global requirements * Fix bug to locate hdfs command in HDFS native driver * Fix AttributeError without share type provided * Implement Manila REST API microversions * Add retry logic when delete a NFS share in VNX * Cleanup shares created by Tempest * Add py34 to test environment to tox.ini * Allow Tempest to skip snapshot tests * Add retries for deadlock-vulnerable DB methods * Adding extend share support in IBM GPFS Driver * Make QuobyteHttpsConnectionWithCaVerification py3 compatible * Add SSL middleware to fix incorrect version host_url * Updated from global requirements * Fix HTTP headers case for API unit tests * Fix bug to run command as root in HDFS driver * Fix typos in neutron_network_plugin.py * Remove incorrect URLs from jenkins.rst * Remove ordering attempts of 'unorderable types' * Fix 'hacking' unit tests for py3 compatibility * Skip unit tests for SSL + py3 * Fix string/binary conversions for py34 compatibility * Make 'utils.monkey_patch' py3 compatible * Decouple some of the Service Instance logic * Wrap iterators and 'dict_items' for py34 compatibitity * Update Documents to use HDFS Driver * Fix two typos on documentation and one typo on CLI help * Stop using deprecated contextlib.nested * Fix imports for py34 compatibility * Fix exceptions handling for py34 compatibility * Rename from il8n.rst to i18n.rst * Remove copyright from empty file * Fix HP3PAR extra-specs scoping prefix bug * Updated from global requirements * Support manage_existing in Huawei driver * Fix HP3PAR SMB extra-specs for ABE and CA * Generic: add service instance mgr set up method * Fix Generic driver share extend * Replace py2 xrange with six.moves.range * Fix integer/float conversions for py34 compatibility * Fix dictionary initialization for Python 3 compatibility * Replace (int, long) with six.integer_types * Fix list creation * Replace dict.iteritems() with six.iteritems() * Add doc share features mapping * Replace 'types.StringTypes' with 'six.string_types' * Replace '__metaclass__' with '@six.add_metaclass' * Fix ZFSSA driver for py34 compatibility * Listen on :: instead of 0.0.0.0 by default 1.0.0.0b2 --------- * Fix slow unit tests * Remove Cinder leftover unit tests * Eventlet green threads not released back to pool * Add client_socket_timeout option to manila.wsgi.Server * Catch error_deleting state for more resources than just shares * Updated from global requirements * Make coverage tox job fail when test coverage was reduced * Add test coverage for periodic tasks * Change _LE to _LW (at manila/share/manager.py) * Fix 'extend_share' in generic driver * Fix unit tests for quobyte * Support shrink_share in Huawei driver * GlusterFS: fix retrieval of management address of GlusterFS volumes * Explicit backend connect call in Quobyte RPCs * Enable multi-process for API service * Updated from global requirements * Make config opt 'enabled_share_protocols' verification case insensitive * glusterfs_native: prefix GlusterFS snap names with "manila-" * glusterfs_native: delete_snapshot(): find out real GlusterFS snap name * glusterfs_native: fix delete share * Reuse 'periodic_task' from oslo_service * Implement shrink_share() method in Generic driver * doc: fix typo s/virutalenv/virtualenv/ * Cleanup DB API unit tests * Add negative tests for admin-only API * Updated from global requirements * HP 3PAR uses scoped extra-specs to influence share creation options * Retry _unmount_device in generic driver * Add 'retry' wrapper to manila/utils.py * Huawei driver support storage pools * Updated from global requirements * Modify confusing name in Huawei driver * Use all types of migrations in devstack installation * Close DB migration sessions explicitly for compatibility with PyMySQL * Delete redundant period in ManilaException messages * Use soft_delete() methods in DB api * Use uuidutils to generate id's in DB api * Add license header to migrations template * Remove models usage from migrations * Huawei manila driver support multi RestURLs * EMC VNX: Fix the total capacity for dynamic Pool * Updated from global requirements * Updated from global requirements * Add access-level support in VNX Manila driver * Enable Manila multi-SVM driver on NetApp cDOT 8.3 * Support for oversubscription in thin provisioning * Fix for SchedulerStatsAdminTest fails on timestamp * Print devstack command traces before executing command * Fix unit tests for compatibility with new mock==1.1.0 * Change "volume" to "share" in filter and weigher * Updated from global requirements * Remove unneeded OS_TEST_DBAPI_ADMIN_CONNECTION * Remove duplicated options in manila/opts.py * More Manila cDOT qualified specs * Add PoolWeigher for Manila scheduler * Remove unused manila/openstack/common/eventlet_backdoor.py * Updated from global requirements 1.0.0.0b1 --------- * Use loopingcall from oslo.service * Updated from global requirements * Use new manila-service-image with public-key auth * Allow drivers to ask for additional share_servers * HP 3PAR driver config has unused username/password * Huawei manila driver support Read-Only share * Override opportunistic database tests to PyMySQL * Support share-server-to-pool mapping in NetApp cDOT driver * Remove unused files from oslo-incubator * Update version for Liberty 1.0.0a0 ------- * Support extend_share in Huawei driver * Fix incompatiblity issue in VNX manila driver * Updated from global requirements * Updated from global requirements * Reduce amount of tempest threads for no-share-servers jobs * Add retry on volume attach error in Generic driver * HP 3PAR Add version checking and logging * Bump supported tempest version * Share_server-pool mapping * Replace it.next() with next(it) for py3 compat * Fix tempest ShareUserRules* tests * Updated from global requirements * Stop using deprecated 'oslo' namespace * Use oslo.utils to get host IP address * Remove deprecated WritableLogger * Make required function arguments explicit * Remove unused contrib/ci files * Fix docstrings in tempest plugin * Updated from global requirements * Add share shrink API * Implement tempest tests for share extend API * Implement extend_share() method in Generic driver * Huawei manila driver code refactoring * Transform share and share servers statuses to lowercase * Updated from global requirements * Fix policy check for API 'security service update' * Remove unused attr status from models * Drop incubating theme from docs * Make devstack install manila-ui if horizon is enabled * glusterfs: Edit doc and comments * Simplify generic driver with private data storage API * Provide private data storage API for drivers * Remove usage of utils.test_utils * Remove ServiceClient from share_client * Switch from MySQL-python to PyMySQL * Add share extend API * Export custom Share model properties with _extra_keys * Release Neutron ports after share server deletion using generic driver * Make generic driver use only ipv4 addresses from service instances * Fix share-server resources cleanup in generic driver * ganesha: Add doc * Update Quickstart guide * NetApp cDOT driver fails Tempest cleanup on clone workflows * Updated from global requirements * Add doc for network plugins * Fix 'AllocType' read failure in Huawei driver * Sync tempest plugin with latest tempest * Updated from global requirements * Improve ShareServer DB model * Updated from global requirements * Add multi vm scenario test * Imported Translations from Transifex * Drop use of 'oslo' namespace package * Updated from global requirements * EMC: Remove unnecessary parameter emc_share_driver * Add doc with basic deployment steps * Move to the oslo.middleware library * Clean up redundant code and nits from EMC VNX driver * Remove unused oslo-incubator modules * EMC VNX Manila Driver Feature Support * Allow overriding the manila test regex * Updated from global requirements 2015.1.0 -------- * NetApp cDOT driver clones NFS export policy * Add config_group_name for NeutronNetworkHelper * Remove ping check from basic scenario test * Sync contrib/tempest to newer state * Fix for the deletion of an error share server * NetApp cDOT driver clones NFS export policy * Sync oslo-incubator code * EMC VNX Driver: Fix typo issues * Remove passing DB reference to drivers in Share Manager * Use oslo_policy lib instead of oslo-incubator code * Use oslo_log instead of oslo-incubator code * Use lib lxml for handling of XML request * Updated from global requirements * Remove direct DB calls from glusterfs_native driver * Release Import of Translations from Transifex * Remove maniladir() and debug() function from utils * Use identity_uri for keystone_authtoken in devstack * Switch to new style policy for test policy * Add mount/umount in scenario tests * update .gitreview for stable/kilo * Update doc-strings for snapshot methods in Share Driver * Use openstackclient in devstack plugin * Remove direct DB usage from NetApp driver * Move response code verification to share client * Use entry_points for manila scripts * Switch to new style policy language 2015.1.0rc1 ----------- * Remove Limited XML API Support from Manila * Prevent hanging share server in 'creating' state * More flexible matching in SSL error test * Imported Translations from Transifex * Mock out base share driver __init__ in EMC driver * Add object caching in manila REST API requests * glusterfs_native: Fix Gluster command call * glusterfs, glusterfs_native: perform version checks * Open Liberty development * Add Glossary with basic Manila terms * Restrict access only to vm ip * NetApp cDOT driver is too strict in delete workflows * Adding configuration instructions in huawei_nas_driver.rst * Update openstack-common reference in openstack/common/README * Prevent share server creation with unsupported network types with cDOT * Fix log/error message formatting * Updated from global requirements * Add segmentation ID checks for different segmentation types * glusterfs_native: make {allow,deny}_access non-destructive * glusterfs_native: negotiate volumes with glusterd * NetApp cDOT driver uses deprecated APIs for NFS exports * Automatic cleanup of share_servers * Fix fields 'deleted' in various DB models for PostgreSQL compatibility * Add tempest coverage for share type access operations * Enable developers to see pylint output * Allow overwriting some Manila tempest settings in CI jobs * Set share-type on share created from snapshot * cDOT multi-SVM driver may choose unsuitable physical port for LIFs * cDOT driver should split clone from snapshot after creation * Replace SQL code for ORM analog in DB migration scripts * Delete skipped tempest tests that won't be enabled * NetApp cDOT drivers should not start without aggregates * IBM GPFS Manila Driver Docs - update * Switch to v2 version of novaclient * Backslashify CIFS share export paths for Generic * NetApp cDOT multi-SVM driver should work with non-VLAN networks * NetApp cDOT multi-SVM driver should not start with cDOT 8.3 * Fix CIFS export format in EMC VNX driver * Forbid unmanage operation for shares with snapshots * Fix deletion of export locations * Add initial scenario test for Manila * Fix setting of share name and description with manage API * HP 3PAR driver documentation * Fix setting of extra specs for share types * Huawei NAS driver returns CIFS export locations in wrong format * IBM GPFS Manila Driver Docs * Fix common misspellings * Add share state verification for API 'unmanage' * Updated from global requirements * Sync tempest plugin with latest tempest * Make generic driver update export location after manage operation * Deal with PEP-0476 certificate chaining checking * Fix manage operation in generic driver * Imported Translations from Transifex 2015.1.0b3 ---------- * Implement manage/unmanage support in generic driver * cDOT driver should report all share export locations * Enable bashate during pep8 run * Allow updates to export locations * NFS based driver for Quobyte file storage system * glusterfs_native: partially implement snapshot * Fix issues with get_pool scheduler API * Use SoftDeleteMixin from oslo.db * Imported Translations from Transifex * Fix cleanup order for tempest test * Enable downgrade migrations in unit tests * Allow shares to have multiple export locations * Add basic manage/unmanage share functionality * Set proper attr "deleted" for ShareTypes model * Imported Translations from Transifex * EMC Isilon Manila Driver Docs * HP3PAR driver log the SHA1 for driver and mediator correctly * Add public attr for shares * Imported Translations from Transifex * Add ro level of access support to generic driver * Remove CLI tests from tempest plugin * Manila Scheduler should read full driver capabilities * NetApp cDOT driver should not create useless export-policy rule * Manila cDOT driver should use loopingcall for ASUP report timing * EMC Isilon Manila driver * Implement private share_types * Updated from global requirements * Always allow delete share-network when no shares exist * Imported Translations from Transifex * Add nova network plugin * Manila cDOT qualified specs * Make extra spec driver_handles_share_servers required * Failed to load xml configure file * Updated from global requirements * Allow tempest to skip RO access level tests * Manila cDOT netapp:thin_provisioned qualified extra spec * Replace TEMPEST_CONCURRENCY with Manila-specific var * doc: Add glusterfs_native driver developer doc * Fix example style in admin doc * Imported Translations from Transifex * Improve error handling in GPFS driver * Updated from global requirements * Add doc for hdfs_native driver * Remove copypasted export_location field from snapshots * HP 3PAR use one filestore per tenant * Single-SVM Manila driver for NetApp Clustered Data ONTAP * Remove hacking exception for oslo.messaging import * Remove Python 2.6 classifier * Remove obsolete option: enabled_backends * Manila access-allow API doesn't accept backslash * Add temporary workaround to scheduler * Add doc for Dynamic Storage Pools for Manila scheduler * Fix config opts description for class NeutronSingleNetworkPlugin * Add snapshot gigabytes quota * Use devstack plugin in CI hooks * HP 3PAR driver fix for delete snapshot * Add Nova-network support to service_instance module * Updated from global requirements * Sync tempest plugin * Manila cDOT storage service catalog * Add devstack plugin * Generic Driver image supported protocols * Updated from global requirements * glusterfs: add NFS-Ganesha based service backend * ganesha utils: allow remote execution as root * Remove left-over modules from Cinder * Add share_type_default() method to API * Add support of default share type * Support Manila pools in NetApp Clustered Data ONTAP driver * Move definition of couple of config opts to proper module * Add support of nova network for share-networks API and DB * Make listing of networks compatible for neutron and nova in devstack * ganesha: fix execute call using invalid argument * Imported Translations from Transifex * Rename volume_type to share_type * Imported Translations from Transifex * Add possibility to enable/disable some share protocols * Add standalone network plugin * Add possibility to define driver mode within pre_test_hook for CI * Skip multisvm tempest tests for singlesvm setup * Correct the share server's db info after its deletion * Add support for HDFS native protocol driver * Fix cinderclient compatibility of list filtering by name * Fix spelling mistake * Fixed spelling mistake in tests * Manila NetApp cDOT driver refactoring * glusterfs: Add doc * Imported Translations from Transifex * fix case sensitivity * Fix generation of config sample * Use oslo_log lib * unify some messages * HP 3PAR Driver for Manila * Do not instantiate network plugin when not used by driver 2015.1.0b2 ---------- * Pool-aware Scheduler Support * Implement additional test for db migrations * Updated from global requirements * Add share driver for HDS NAS Scale-out Platform * Replace legacy StubOutForTesting class * Add unit test for volume types * Add CI job support for second mode of Generic driver * Implement additional driver mode for Generic driver * ganesha: fix resetting of exports * Remove workaround for Nova VM boot bug * Add tracing facility to NetApp cDOT driver * Remove startswith for share_proto check * Remove copy-pasted code for fake-share * driver: Fix ganesha config option registry * Workaround Nova VM boot bug * Add access levels for shares * Imported Translations from Transifex * Add factory for NetApp drivers * Updated from global requirements * Search snapshot by ID instead of name in Huawei driver * Fix documentation for some Ganesha config variables * Add Neutron single network plugin * Add unit test for quota remains functionality * Switch to using oslo_* instead of oslo.* * utils: Allow discovery of private key in ~/.ssh * Updated from global requirements * Do not use router for service instance with direct connect * Port cinder EMS and ASUP support to manila * Adapt readme to usual structure * glusterfs: add infrastructure to accommodate NAS helpers * Fix tempest pep8 failures * Release resources in tempest test properly * Replace string driver modes with boolean value * Adding required rootwrap filters for GPFS driver * Add doc for Huawei driver * Fix pep8 error E265 in wsgi * fix typo in config.py * fix typo in nova.py helpline * fix typo in rpc.rst * Fix typo "authogenerate" in manila-manage * Updated from global requirements * Fix searching mechanism of share-networks within tempest * Fix small typo in 70-manila.sh * Change default migration in "manila-manage db downgrade" command * Add manila.conf.sample to .gitignore * Fix deletion of share-server within Generic driver * Fix devstack compatibility * Reuse network resources in share-server creation test * Updated from global requirements * Add share driver for Huawei V3 Storage * Make Tempest tests use networks only from same project * Refactor tempest test 'test_create_share_with_size_bigger_than_quota' * Sync tempest plugin with latest Tempest * Update message for exception ShareNetworkNotFound * Update documentation for tempest integration * Add error suppressing to isolated creds cleanup in Tempest plugin * Updated from global requirements * Fix handling of share-networks with single_svm drivers * Set pbr 'warnerrors' option for doc build * Fix nit in tempest naming * Fix documentation build * Imported Translations from Transifex * Fix TypeError in tempest retry functionality * Fix using anyjson in fake_notifier * Fix typo in db migration test function name * Use Cinder v2 API within Generic driver * Add driver mode attr definition for all drivers * Fix concurrency problem in getting share network in Tempest * Make it possible to update tempest conf in all CI Tempest jobs * Use oslotest.base.BaseTestCase as test base class * Add possibility to create lots of shares in parallel for tempest * Add service id to information provided by API * Raise error immediately for undeletable share in tempest * py3: use function next() instead of next() method on iterator objects * Allow deleting share with invalid share server in generic driver * Rename share driver stats update method * Remove unsed python modules from requirements * Remove unused conf option 'fake_tests' * Make tempest cleanup errors be suppressed in all CI jobs * Add retries for share creation within Tempest plugin * Remove unused sslutils module * Improve share driver mode setting * py3: use six.moves.range instead of xrange * py3: use six.moves.urllib.parse instead of urlparse * Use lockutils from "oslo concurrency" lib * Remove non-active host from host_state_map * Strip exec_dirs prefix from rootwrap filters * Add possibility to suppress errors in Tempest plugin cleanup * Make Tempest repo stable for Manila * Use uuidutils from oslo.utils * Cleanup manila/utils.py * Remove configs sql_connection and sql_connection_debug * Remove unused configs pybasedir and bindir * Remove unused connection_type config * Fix tempest test with share server listing with no filters * Improve tempest share server filtering * Increase quotas and number of threads for tempest * Use oslo.context lib * Imported Translations from Transifex * Add missing imports for sample config generation * Fix tempest compatibility for network client * Fix driver mode opt definition * Adds Oracle ZFSSA driver for Manila 2015.1.0b1 ---------- * ganesha: NFS-Ganesha instrumentation * Add driver mode interface * Updated from global requirements * Updated from global requirements * Move networking from share manager to driver interface * Workflow documentation is now in infra-manual * Fix error message in share delete method * glusterfs: create share of specific size * Fix metadata validation in share api * Fix devstack plugin custom config opt setting * Enhance devstack plugin * Update EMC Manila driver framework using stevedore * Alternative way to import emc.plugins.registry * Fix wrong mock assertions in unit tests * Release network resources properly * Updated from global requirements * Imported Translations from Transifex * Add support for volume types with Generic driver * Fix H302 rule after release of oslo.concurrency 0.3.0 * Fix for debugging m-shr in PyCharm * Updated from global requirements * Fix tempest compatibility for cli tests * Fix context.elevated * Updated from global requirements * Updated from global requirements * Remove obsolete methods from tempest service client * Switch to oslo.concurrency for processutils * Updated from global requirements * Use oslo.utils.netutils function to set tcp_keepalive * Fix couple of nit picks * Use keystonemiddleware and manila.conf for config * Imported Translations from Transifex * Updated from global requirements * Fix share manager to save data after driver error * Adding GPFS Manila driver * Remove object in wsgi LOG.info * Fix share network id in tempest test * Convert files to use _LE and friends * Imported Translations from Transifex * Fix concurrency issue in security-service tempest test * Sync Tempest plugin with latest Tempest changes * Improve share-network list API filtering * Updated from global requirements * Don't translate LOG messages in testsuite * Add admin doc for multiple backends configuration * Remove gettextutils * Use proper value for osap_share_extension * Refactor shares client init in Tempest plugin * Delete unused versionutils module * Sync with oslo-incubator * Updated from global requirements * Use oslo.utils - remove importutils usage * Switch to oslo.config * Use oslo.serialization * Use oslo.utils * Silence tox warning * Add manila specific hacking checks * Remove extra flake8 args * Sync with global requirements * Improve share snapshots list API filtering * Use oslo.i18n * Use six instead of str for exceptions * Add info to cDOT driver doc * Fix tempest compatibility * Add new search options for security service * Fix doc build * Add Admin doc for an Introduction to Manila * Add share server id field in shares detail info * Improve share list API filtering * Fix doc build warnings so docs build clean * Remove extraneous vim editor configuration comments * Add share network id field in share server info * Fix tempest compatibility * Use 'generate_request_id' func from common code * Remove vim headers * Add info to generic driver doc * Open Kilo development * Add doc for EMC VNX driver 2014.2 ------ * Fix creation of share from snapshot * Specify the correct Samba share path * Fixes several typos (Manila) * Fix KeyError while creating share from snapshot * Fix references in jenkins.rst * Update translation information * Mention Samba in intro.rst * Add doc for an Introduction to Manila 2014.2.rc1 ---------- * Add support for working with multiple glusterfs volumes * Minor Manila doc change * Make copyrights in docs as comments instead of page content * Update challenges in the developer docs * Update naming from clustered mode to cDOT * Fix doc build errors in db/sqlalchemy/models.py * Improve documentation build * Add doc for netapp cluster mode driver * Add doc for generic driver * Fix using key for ssh * Fix getting ssh key if ssh path is not set * Rename stackforge to openstack in docs * Move from stackforge to openstack * Fix two functional tests within tempest_plugin * glusterfs: edit config option specifying volume * Change exception thrown by db method * Fix some LOG.debug invocations * Fix Invalid pathname for netapp cmode driver * Make block devices mounts permanent within service instances * Stop using intersphinx * Increase share-network default quota * Don't allow security service to be updated if used * Move db related unittests to proper places * Fix update of backend details in cmode driver * Update shares and snapshot create to show details * Use oslosphinx and remove local copy of doc theme * Move driver unittest modules to proper place * Move unittests related to manila/share/*.py modules to proper place * Make NFS exports in generic driver permanent * Fix ssh connection recreation in generic driver * Drop a forgotten fragment * warn against sorting requirements * Fix version number to Juno 2014.2.b3 --------- * Add support for glusterfs native protocol driver * Fix some LOG invocations and messages * EMC VNX Manila Plugin * Add support for cert based access type * Make m-shr more stable on start up * Fix scheduled share creation with generic driver * Add "." at end of exceptions * py3: Use six module for StringIO imports * Update share_network obj after db update * Transform Exception args to strings when exceptions * Fix string concatenation * glusterfs: Fix docstring * Fix concurrent policy issue in unittest * Remove redundant glance config options * Improve help strings * Remove hash seed dependency for unittests * Updated usage of locks * Fix creation of cifs entry in cmode driver * Flake8: Fix and enable H405 * Forbid to attach security services with same type to share network * Flake8: Fix H501 * Flake8: Fix and enable H404 * Flake8: Fix E128 * Fix device mount/umount methods in generic driver * Change service VM connectivity * Use Alembic instead of Sqlalchemy-migrate in Manila * Flake8: Fix H302 * Remove NetApp 7-mode driver as obsolete * Flake8: Fix F841 * Remove bin/manila-rpc-zmq-receiver * Cmode, CIFS shares, fix allowed share access type * Fix obtaining of service VM ip * EMC Manila driver * Add specific docs build option to tox * Flake8: Fix some occurences of F841 * Flake8: Fix E126 and E127 * Flake8: Fix F401 * pep8: Enable H303 and F403 * Sync requirements with global requirements * Remove extra setenv from tox.ini * Enable E121,E122,E123,E124,E125,E129 flake8 tests * Refactor NetApp Cmode driver * Use opportunistic migrations * Add config option for share volume fs type * Fix failing of unittests in one thread * Fix H402 hacking rules * Fix pep8 issues in manila/tests * Clean up devstack plugin after LVM driver removal * Remove LVM driver * Fix pep8 failures in manila/{db,volume} * Handle missing config options for tests gracefully * Add oslo.utils and oslo.i18n libs to requirements * Issue one SQL statement per execute() call * Further pep8 fixes * Fix pep8 F811 and F812 * Rename 'sid' to 'user' in access rules and sec services * Decrease amount of threads for Tempest tests * Flake8 in bin/* * Remove manila-clear-rabbit-queues * Sync scripts with oslo-incubator * Replace utils.config_find with CONF.find_file * Use common code within manila.policy module * Fix bad indentation in manila * Refactor cifs helper for generic driver * Fix share status waiter within tempest * Fix update of share with share-server-id * Use common config generator * Add config module from oslo-incubator * Remove dangerous arguments default * Remove unused imports * Fix F402 pep8 * Make flake8 ignore list more fine granular * Sync common modules from Oslo * Add share_server_id filter option to 'get_all' share API method * Fix tempest compatibility * Fix pep8 F821 * Update requirements file matching global requ * glusterfs: Edit comments and docstrings * glusterfs: Modify interface methods * Fix setting up security-services in Cmode * Update pep8 testing * Added calculating capacity info in Cmode * Added calculating capacity info to 7mode driver * Adds undocumented policies and defaults in policy.json * Add check on eventlet bug #105 (ipv6 support) * Remove reference to 'in-use' state in share manager * Enable check for H237 * Use oslo.rootwrap library instead of local copy * py3.x: Use six.text_type() instead of unicode() * py3: use six.string_types instead of basestring * Use oslo.db in manila * Fix compatibility with tempest project * README merge * Refactor test framework * Add interprocess locks to net interfaces handlers * Fix obtaining of service instance ip * Setup for translation * Enabled hacking checks H305 and H307 * Fix service subnet capacity within service_instance module * Fix metaclasses assignment * Enable hacking check H236 * Add share-server-delete API * Change get_client_with_isolated_creads() to *_creds() * Sync with global requirements * Fix E112 expected an indented block * Fix E713 test for membership should be 'not in' * Fix E131 continuation line unaligned for hanging indent * Address H104 File contains nothing but comments * Fix E251 unexpected spaces around keyword / parameter equals * Fix E265 block comment should start with '# ' * Fix usage of ProcessExecutionError exception * Enabled hacking check H403 * py33: use six.iteritems for item iterations (part2) * Cleanup manila.utils module (part1) * glusterfs: Implement methods to update share stats * glusterfs: Fix issues in backend instrumentation * Enabled hacking check H401 * Use ssh_execute function from common code * Use execute() and trycmd() functions from common code * Use looping calls for running services from common code * Fix typo in error message for share_export_ip * py33: use six.iteritems for item iterations (part1) * Change logging level AUDIT to INFO * Teardown/setup server enhancements * Removed custom synchronized in service_instance * Migrate to oslo.messaging instead of commom/rpc * Removed redundant methods from singletenant drivers * Replace python print operator with print function (pep H233, py33) * share.manager: Modify allow_access method call * Delete skipped quota tests as invalid * Add CLI tests for share-server-list API * Added retrieving vserver name from backend details * Update ci scripts * service_instance: Add lock to creation of security_group * Enable skipped tests from test_capacity_weigher.py * Add using share-server backend details in Generic driver * Fixed passing share_server to teardown_network * Fix create_share_from_snapshot method * Added tempest tests * Cleaned up exception module and added unittests * Check share net ids when creating share from snapshot * Update manila's docs * Replace usage of unittest module with manila.test * Fix tempest test's rare concurrent issue * Improved share_servers db api * Fixed passing share_server to ensure_share * Rewrited mox tests to mock (part 2) * Fix lvm driver to be compatible with share manager * Rewrited mox tests to mock (part 1) * Replace json with jsonutils from common code * Removed redundant code for glance * Use testtools module instead unittest module * Cleanup resources with tempest more reliably * Added service_instance_locks directory to .gitignore * Added force-delete action to admin actions * Update contrib/ci bash scripts * devstack: strip obsolete part of m-shr instumentation * Sync common modules from Oslo * Several fixies to tempest plugin * Moved exports needed for tempest into post_test_hook * Fix some cosmetic issues in README.rst * Fixed ci bash scripts * Remove explicit dependency on amqplib * Added share server api * Removed redundant dependency of hp3parclient * Add multibackend test suite for tempest plugin * Added bash scripts for ci jobs * Added multibackendency to devstack plugin * Switch to Hacking 0.8.x * Use Python 3.x compatible except construct * assertEquals is deprecated, use assertEqual * Share server details * Added locks into service_instance module * Removed redundant option from devstack plugin * Separated locks for cifs and server operations * Share servers implementation * Made safe get of security_groups with nova's response * Made service_instance consider driver's config * Set locks for shared resources in generic driver's cifs helper * change assertEquals to assertEqual * change assert_ to assertTrue * Added handling of secgroup for service_instance module * set default auth_strategy to keystone * Enabled ip rules tests for cifs in tempest * Increase default quota for share networks from 3 to 5 * debug level logs should not be translated * tempest plugin update * Fixed tempest plugin compatibility * Fixed possibility to have more than 25 shares with generic driver * Retrieve share_backend name from config on get_share_stats * Fixed retrieving export ip address in Cmode drv * Made template for service VM unique using generic driver * Fixed usage of config option in generic driver * Replaced manila.conf.sample with README.manila.conf * Added API to manage volume types * Fixed rise of Duplicate exception for DB * Added volume_types to DB * Removed unused module from unittests * Raise max header size to accommodate large tokens * Added cli tests for service-list request * Allowed devstack not fail if couldn't stop smb service * Removed redundant keystone token usage * Refactored service-list filters * Fixed tempest plugin compatibility with master * Checking security service is not used while deleting * Added creation of secgroup for service vms in devstack plugin * Removed unique constraint for share networks * Added type field to security services index list * Update tempest plugin for latest changes of manila * Made max limit name for snapshots unique * Made limits usages names unique * Fixed ownership for service volumes * Fixed quotas for share-networks * Fixes bug with share network deactivation * Added extension that provides used resources in absolute limits * Fixed detail list for shares * Added quota for share-networks * Teardown share network in Netapp Cmode driver * Fixed detail list for security-services * Fix venv installation for run_tests.sh * Updated generic_driver and service_instance with activation * Added Cmode driver * Fixed race condition in tempest plugin * Fixes bug with simultaneous network modification * Fixes bug with keypair creating * Update tempest plugin, make it more stable * Add exception to tempest plugin * Splits service_instance module from generic driver * Make functions in manila uniquenamed * Fixed creation of cinder's volumes * Add share network activate and deactivate * Separate action and creation tests in tempest * Add handling of share-networks to tempest plugin * Fix sequence of called functions in devstack plugin * Update policy.json * Enforce function declaration format in bash8 * Switched devstack plugin to use generic driver * DevStack plugin: make source dirs configurable * Fixes bug with getting hostname * Fix DevStack plugin's source collection issue * Let DevStack plugin get python executable path * Removed swiftclient from dependencies * Use uuid instead of uuidutils * Update plugin for tempest * Add detail filter for share-network-list * Add function cidr_to_netmask to utils * Fixes bug with path to ssh keys * Fixed detail list for security-services * Removed cinder artifacts in devstack plugin * Added to devstack plugin passwords for services * Generic driver * Fix devstack plugin's usage of RECLONE option * Removes use of timeutils.set_time_override * Adds modules for managing network interfaces for generic driver * Extends neutron api with methods needed for generic driver * Adds nova api needed for generic driver implementation * Adds cinder api needed for generic driver implementation * Squash all migrations into one * Add network id verification on share creation * Add policy checks in share networks API * Fix policy.py * Updated from global requirements * Fix bad calls to model_query() * Change manila DB to have working unique constraint * Change 'deleted' to Boolean in project_user_quotas * Fixes handling of duplicate share access rule creation * Fixes empty network_info for share * Use actual rootwrap option in manila.conf instead deprecated one * Fix xml response for create/update security service * Add 'password' field to the security service * Adds network creation to ShareManager * Checking if access rule exists in share api * Add share's networks API * Add share's networks DB model, API and neutron support * Fix manila's devstack plugin for using Fedora/CentOS/RHEL distro * Add manila's tempest-plugin * Security service API * Add security service DB model and API * Remove redundant options in devstack plugin * Fix bug with full access to reset-state * glusterfs: Add GlusterFS driver * Fix manila's devstack plugin * Adds an ability to reset snapshot state * Adds validation of access rules * Adds admin actions extension to provide reset-state command * Refactoring driver interfaces * Move NetAppApiClient to separate module * Moved netapp.py from drivers to drivers/netapp * Insert validation of losetup duplicates * Remove redundant options for manila * Place devstack files to proper dirs * Fixes inappropriate size of metadata value * Adds 'metadata' key to list of options for xml responses * Adds an ability to manage share metadata * Added Neutron API module * Add consume_from_share method to HostState class * Add devstack integration * Update requirements.txt for keystoneclient * Support building wheels (PEP-427) * Update openstack/common/lockutils * Remove unused manila.compute.aggregate_states * Remove obsolete redhat-eventlet.patch * Added per user-tenant quota support * Change wording of short description * Removing deprecated using of flags module from project * Fixed share size validation while creating from snapshot * Fixed xml response for share snapshot * Added share size checking if creating from snapshot * Fixed values passed to share_rpcapi.create_share * Remove d2to1 dependency * Update functionality implementation for manila api * Fixed policy check for manila api * Added XML serialization for access actions * Check policy implementation for shares api * Update README with relevant Manila information * Fix xml response content for share list/show * Add .gitreview file * Unittests failure fix * Fixed snapshot_id None for share * Quota releasing on snapshot deleting bug fixed * Fixed absolute limits * fixed pep8 * Stubed driver do_setup in start_service * Quota tests fixed * removed egg-info * modified conf sample * modified docs * docs * snapshot view, size added * quotas for snapshot * fixed api error * snapshot size * fixed TYPO * Access create empty boy fix * User cannot delete snapshot fix * Can not delete share with error status fixed * response status for share with snapshot delete request - fixed * fixed null value validation for snapshot id * fixed share temaplate name * fixed share snapshots * pep8 fix * License flake8 error fixed * Fixed flake8 errors * Api share-snapshots to snapshots * Removed unused imports * Fixed api tests * Removed v2 api. Moved shares and snapshots from contrib to v1 * quotas exception fix * Quotas fix * Deleted api v2 * Quotas fixed. quotas unittests fixed * Removed ubused unittests * fixed fake flags * Removed volume specific tests * merge * Mass replace osapi_volume to osapi_share Removed locale * Update connfig.sample scripts * Update connfig.sample scripts * Removed unused opts from flags.py * removed some volume occurances * removed block specific exceptions * osapi_volume to osapi_share * removed volumes from bin scripts * Added help to smb_config_path conf * modified fake flags * deleted brick * fixed manila manage * api-paste.ini: osapi_volume to osapi-share * Replaced cinder with manila * Renamed service api config opts. Set default port to 8786 * removed volumes from scheduler * deleteted .idea, added .gitignore * volume api removed * fixed keystone context * api fix * Removed backups * DB cleaned * Removed SM models and migrations * Modified models * Modified migrations * Removed block-specific from DB api * Deleted manila.volume * Renamed cinder to manila. Fixed setup.py, fixed bin scripts * Initialize from cinder * Initial commit manila-2.0.0/PKG-INFO0000664000567000056710000000302112701407265015210 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: manila Version: 2.0.0 Summary: Shared Storage for OpenStack Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== MANILA ====== You have come across an OpenStack shared file system service. It has identified itself as "Manila." It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/Manila * Developer docs: http://docs.openstack.org/developer/manila Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://github.com/openstack/manila.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/manila Python client ------------- https://github.com/openstack/python-manilaclient.git Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 manila-2.0.0/tox.ini0000664000567000056710000000356312701407112015430 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py34,py27,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} usedevelop = True install_command = pip install {opts} {packages} whitelist_externals = find deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = find . -type f -name "*.pyc" -delete ostestr {posargs} [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees \ -b html releasenotes/source releasenotes/build/html [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:debug-py27] basepython = python2.7 commands = oslo_debug_helper {posargs} [testenv:debug-py34] basepython = python3.4 commands = oslo_debug_helper {posargs} [testenv:pep8] commands = flake8 {posargs} # Run bashate during pep8 runs to ensure violations are caught by # the check and gate queues. bashate -i E006 \ tools/enable-pre-commit-hook.sh \ contrib/ci/pre_test_hook.sh \ contrib/ci/post_test_hook.sh \ devstack/plugin.sh \ tools/cover.sh \ run_tests.sh [testenv:genconfig] whitelist_externals = bash commands = oslo-config-generator --config-file etc/oslo-config-generator/manila.conf [testenv:venv] commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx [testenv:cover] commands = {toxinidir}/tools/cover.sh {posargs} [testenv:fast8] commands = {toxinidir}/tools/fast8.sh [testenv:pylint] deps = -r{toxinidir}/requirements.txt pylint==0.26.0 whitelist_externals = bash commands = bash tools/lintstack.sh [testenv:lint] deps = -r{toxinidir}/requirements.txt pylint==0.26.0 commands = python tools/lintstack.py check [flake8] # Following checks are ignored on purpose: ignore = builtins = _ exclude = .venv,.tox,dist,doc,*egg [hacking] import_exceptions = manila.i18n local-check-factory = manila.hacking.checks.factory manila-2.0.0/contrib/0000775000567000056710000000000012701407265015557 5ustar jenkinsjenkins00000000000000manila-2.0.0/contrib/share_driver_hooks/0000775000567000056710000000000012701407265021437 5ustar jenkinsjenkins00000000000000manila-2.0.0/contrib/share_driver_hooks/zaqar_notification_example_consumer.py0000775000567000056710000001666612701407107031340 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import pprint import signal import sys import time import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils import six opts = [ cfg.IntOpt( "consume_interval", default=5, deprecated_name="sleep_between_consume_attempts", help=("Time that script will sleep between requests for consuming " "Zaqar messages in seconds."), ), cfg.StrOpt( "mount_dir", default="/tmp", help="Directory that will contain all mounted shares." ), cfg.ListOpt( "expected_ip_addresses", default=[], help=("List of IP addresses that are expected to be found in access " "rules to trigger [un]mount operation for a share.") ), ] CONF = cfg.CONF def print_with_time(data): time = six.text_type(timeutils.utcnow()) print(time + " " + six.text_type(data)) def print_pretty_dict(d): pprint.pprint(d) def pop_zaqar_messages(client, queues_names): if not isinstance(queues_names, (list, set, tuple)): queues_names = (queues_names, ) try: user = client.conf['auth_opts']['options']['os_username'] project = client.conf['auth_opts']['options']['os_project_name'] messages = [] for queue_name in queues_names: queue = client.queue(queue_name) messages.extend([six.text_type(m.body) for m in queue.pop()]) print_with_time( "Received %(len)s message[s] from '%(q)s' " "queue using '%(u)s' user and '%(p)s' project." % { 'len': len(messages), 'q': queue_name, 'u': user, 'p': project, } ) return messages except Exception as e: print_with_time("Caught exception - %s" % e) return [] def signal_handler(signal, frame): print("") print_with_time("Ctrl+C was pressed. Shutting down consumer.") sys.exit(0) def parse_str_to_dict(string): if not isinstance(string, six.string_types): return string result = eval(string) return result def handle_message(data): """Handles consumed message. Expected structure of a message is following: {'data': { 'access_rules': [ { 'access_id': u'b28268b9-36c6-40d3-a485-22534077328f', 'access_instance_id': u'd137b2cb-f549-4141-9dd7-36b2789fb973', 'access_level': u'rw', 'access_state': u'active', 'access_to': u'7.7.7.7', 'access_type': u'ip', } ], 'availability_zone': u'nova', 'export_locations': [u'127.0.0.1:/path/to/nfs/share'], 'is_allow_operation': True, 'share_id': u'053eae9a-726f-4f7e-8502-49d7b1adf290', 'share_instance_id': u'dc33e554-e0b9-40f5-9046-c198716d73a0', 'share_proto': u'NFS' }} """ if 'data' in data.keys(): data = data['data'] valid_access = ( 'access_rules' in data and len(data['access_rules']) == 1 and data['access_rules'][0].get('access_type', '?').lower() == 'ip' and data.get('share_proto', '?').lower() == 'nfs' ) if valid_access: is_allow_operation = data['is_allow_operation'] export_location = data['export_locations'][0] if is_allow_operation: mount_share(export_location, data['access_to']) else: unmount_share(export_location, data['access_to']) else: print_with_time('Do nothing with above message.') def execute(cmd): try: print_with_time('Executing following command: \n%s' % cmd) cmd = cmd.split() stdout, stderr = processutils.execute(*cmd) if stderr: print_with_time('Got error: %s' % stderr) return stdout, stderr except Exception as e: print_with_time('Got following error: %s' % e) return False, True def is_share_mounted(mount_point): mounts, stderr = execute('mount') return mount_point in mounts def rule_affects_me(ip_or_cidr): if '/' in ip_or_cidr: net = netaddr.IPNetwork(ip_or_cidr) for my_ip in CONF.zaqar.expected_ip_addresses: if netaddr.IPAddress(my_ip) in net: return True else: for my_ip in CONF.zaqar.expected_ip_addresses: if my_ip == ip_or_cidr: return True return False def mount_share(export_location, access_to): data = { 'mount_point': os.path.join(CONF.zaqar.mount_dir, export_location.split('/')[-1]), 'export_location': export_location, } if (rule_affects_me(access_to) and not is_share_mounted(data['mount_point'])): print_with_time( "Mounting '%(export_location)s' share to %(mount_point)s.") execute('sudo mkdir -p %(mount_point)s' % data) stdout, stderr = execute( 'sudo mount.nfs %(export_location)s %(mount_point)s' % data) if stderr: print_with_time("Mount operation failed.") else: print_with_time("Mount operation went OK.") def unmount_share(export_location, access_to): if rule_affects_me(access_to) and is_share_mounted(export_location): print_with_time("Unmounting '%(export_location)s' share.") stdout, stderr = execute('sudo umount %s' % export_location) if stderr: print_with_time("Unmount operation failed.") else: print_with_time("Unmount operation went OK.") def main(): # Register other local modules cur = os.path.dirname(__file__) pathtest = os.path.join(cur) sys.path.append(pathtest) # Init configuration CONF(sys.argv[1:], project="manila_notifier", version=1.0) CONF.register_opts(opts, group="zaqar") # Import common config and Zaqar client import zaqarclientwrapper # Handle SIGINT signal.signal(signal.SIGINT, signal_handler) # Run consumer print_with_time("Consumer was successfully run.") while(True): messages = pop_zaqar_messages( zaqarclientwrapper.ZAQARCLIENT, CONF.zaqar.zaqar_queues) if not messages: message = ("No new messages in '%s' queue[s] " "found." % ','.join(CONF.zaqar.zaqar_queues)) else: message = "Got following messages:" print_with_time(message) for message in messages: message = parse_str_to_dict(message) print_pretty_dict(message) handle_message(message) time.sleep(CONF.zaqar.consume_interval) if __name__ == '__main__': main() manila-2.0.0/contrib/share_driver_hooks/zaqarclientwrapper.py0000664000567000056710000000537012701407107025727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from zaqarclient.queues import client as zaqar zaqar_notification_opts = [ cfg.StrOpt( "zaqar_username", help="Username that should be used for init of zaqar client.", ), cfg.StrOpt( "zaqar_password", secret=True, help="Password for user specified in opt 'zaqar_username'.", ), cfg.StrOpt( "zaqar_project_name", help=("Project/Tenant name that is owns user specified " "in opt 'zaqar_username'."), ), cfg.StrOpt( "zaqar_auth_url", default="http://127.0.0.1:35357/v2.0/", help="Auth url to be used by Zaqar client.", ), cfg.StrOpt( "zaqar_region_name", help="Name of the region that should be used. Optional.", ), cfg.StrOpt( "zaqar_service_type", default="messaging", help="Service type for Zaqar. Optional.", ), cfg.StrOpt( "zaqar_endpoint_type", default="publicURL", help="Type of endpoint to be used for init of Zaqar client. Optional.", ), cfg.FloatOpt( "zaqar_api_version", default=1.1, help="Version of Zaqar API to use. Optional.", ), cfg.ListOpt( "zaqar_queues", default=["manila_notification_qeueue"], help=("List of queues names to be used for sending Manila " "notifications. Optional."), ), ] CONF = cfg.CONF CONF.register_opts(zaqar_notification_opts, group='zaqar') ZAQARCLIENT = zaqar.Client( version=CONF.zaqar.zaqar_api_version, conf={ "auth_opts": { "backend": "keystone", "options": { "os_username": CONF.zaqar.zaqar_username, "os_password": CONF.zaqar.zaqar_password, "os_project_name": CONF.zaqar.zaqar_project_name, "os_auth_url": CONF.zaqar.zaqar_auth_url, "os_region_name": CONF.zaqar.zaqar_region_name, "os_service_type": CONF.zaqar.zaqar_service_type, "os_endpoint_type": CONF.zaqar.zaqar_endpoint_type, "insecure": True, }, }, }, ) manila-2.0.0/contrib/share_driver_hooks/zaqar_notification.py0000664000567000056710000001150012701407107025665 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from manila import exception from manila.share import api from manila.share import hook from manila.share.hooks import zaqarclientwrapper # noqa CONF = zaqarclientwrapper.CONF LOG = log.getLogger(__name__) ZAQARCLIENT = zaqarclientwrapper.ZAQARCLIENT class ZaqarNotification(hook.HookBase): share_api = api.API() def _access_changed_trigger(self, context, func_name, access_rules_ids, share_instance_id): access = [self.db.share_access_get(context, rule_id) for rule_id in access_rules_ids] share_instance = self.db.share_instance_get(context, share_instance_id) share = self.share_api.get(context, share_id=share_instance.share_id) def rules_view(rules): result = [] for rule in rules: access_instance = None for ins in rule.instance_mappings: if ins.share_instance_id == share_instance_id: access_instance = ins break else: raise exception.InstanceNotFound( instance_id=share_instance_id) result.append({ 'access_id': rule.id, 'access_instance_id': access_instance.id, 'access_type': rule.access_type, 'access_to': rule.access_to, 'access_level': rule.access_level, }) return result is_allow_operation = 'allow' in func_name results = { 'share_id': share.share_id, 'share_instance_id': share_instance_id, 'export_locations': [ el.path for el in share_instance.export_locations], 'share_proto': share.share_proto, 'access_rules': rules_view(access), 'is_allow_operation': is_allow_operation, 'availability_zone': share_instance.availability_zone, } LOG.debug(results) return results def _execute_pre_hook(self, context, func_name, *args, **kwargs): LOG.debug("\n PRE zaqar notification has been called for " "method '%s'.\n" % func_name) if func_name == "deny_access": LOG.debug("\nSending notification about denied access.\n") data = self._access_changed_trigger( context, func_name, kwargs.get('access_rules'), kwargs.get('share_instance_id'), ) self._send_notification(data) def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): LOG.debug("\n POST zaqar notification has been called for " "method '%s'.\n" % func_name) if func_name == "allow_access": LOG.debug("\nSending notification about allowed access.\n") data = self._access_changed_trigger( context, func_name, kwargs.get('access_rules'), kwargs.get('share_instance_id'), ) self._send_notification(data) def _send_notification(self, data): for queue_name in CONF.zaqar.zaqar_queues: ZAQARCLIENT.queue_name = queue_name message = { "body": { "example_message": ( "message generated at '%s'" % timeutils.utcnow()), "data": data, } } LOG.debug( "\n Sending message %(m)s to '%(q)s' queue using '%(u)s' user " "and '%(p)s' project." % { 'm': message, 'q': queue_name, 'u': CONF.zaqar.zaqar_username, 'p': CONF.zaqar.zaqar_project_name, } ) queue = ZAQARCLIENT.queue(queue_name) queue.post(message) def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): LOG.debug("Periodic zaqar notification has been called. (Placeholder)") manila-2.0.0/contrib/share_driver_hooks/README.rst0000664000567000056710000000630012701407107023120 0ustar jenkinsjenkins00000000000000Manila mount automation example using share driver hooks feature ================================================================ Manila has feature called 'share driver hooks'. Which allows to perform actions before and after driver actions such as 'create share' or 'access allow', also allows to do custom things on periodic basis. Here, we provide example of mount automation using this feature. This example uses OpenStack Zaqar project for sending notifications when operations 'access allow' and 'access deny' are performed. Server side hook will send notifications about changed access for shares after granting and prior to denying access. Possibilities of the mount automation example (consumer) -------------------------------------------------------- - Supports only 'NFS' protocol. - Supports only 'IP' rules. - Supports both levels of access - 'RW' and 'RO'. - Consume interval can be configured. - Allows to choose parent mount directory. Server side setup and run ------------------------- 1. Place files 'zaqarclientwrapper.py' and 'zaqar_notification.py' to dir %manila_dir%/manila/share/hooks. Then update manila configuration file with following options: :: [share_backend_config_group] hook_drivers = manila.share.hooks.zaqar_notification.ZaqarNotification enable_pre_hooks = True enable_post_hooks = True enable_periodic_hooks = False [zaqar] zaqar_auth_url = http://%ip_of_endpoint_with_keystone%:35357/v2.0/ zaqar_region_name = %name_of_region_optional% zaqar_username = foo_user zaqar_password = foo_tenant zaqar_project_name = foo_password zaqar_queues = manila_notification 2. Restart manila-share service. Consumer side setup and run --------------------------- 1. Place files 'zaqarclientwrapper.py' and 'zaqar_notification_example_consumer.py' to any dir on user machine, but they both should be in the same dir. 2. Make sure that following dependencies are installed: - PIP dependencies: - netaddr - oslo_concurrency - oslo_config - oslo_utils - python-zaqarclient - six - System libs that install 'mount' and 'mount.nfs' apps. 3. Create file with following options: :: [zaqar] # Consumer-related options sleep_between_consume_attempts = 7 mount_dir = "/tmp" expected_ip_addresses = 10.254.0.4 # Common options for consumer and server sides zaqar_auth_url = http://%ip_of_endpoint_with_keystone%:35357/v2.0/ zaqar_region_name = %name_of_region_optional% zaqar_username = foo_user zaqar_password = foo_tenant zaqar_project_name = foo_password zaqar_queues = manila_notification Consumer options descriptions: - 'sleep_between_consume_attempts' - wait interval between consuming notifications from message queue. - 'mount_dir' - parent mount directory that will contain all mounted shares as subdirectories. - 'expected_ip_addresses' - list of IP addresses that are expected to be granted access for. Could be either equal to or be part of a CIDR. Match triggers [un]mount operations. 4. Run consumer with following command: :: $ zaqar_notification_example_consumer.py --config-file path/to/config.conf 5. Now create NFS share and grant IP access to consumer by its IP address. manila-2.0.0/contrib/ci/0000775000567000056710000000000012701407265016152 5ustar jenkinsjenkins00000000000000manila-2.0.0/contrib/ci/common.sh0000775000567000056710000000470312701407107020000 0ustar jenkinsjenkins00000000000000# Environment variables export MANILA_TEMPEST_COMMIT="4aaa5493" # 2 Mar, 2016 # ---------------------------------------------- # Functions # Import devstack functions source $BASE/new/devstack/functions function manila_check_service_vm_availability { # First argument is expected to be IP address of a service VM wait_step=10 wait_timeout=300 available='false' while (( wait_timeout > 0 )) ; do if ping -w 1 $1; then available='true' break fi ((wait_timeout-=$wait_step)) sleep $wait_step done if [[ $available == 'true' ]]; then echo "SUCCESS! Service VM $1 is available." else echo "FAILURE! Service VM $1 is not available." exit 1 fi } function manila_wait_for_generic_driver_init { # First argument is expected to be file path to Manila config MANILA_CONF=$1 DRIVER_GROUPS=$(iniget $MANILA_CONF DEFAULT enabled_share_backends) for driver_group in ${DRIVER_GROUPS//,/ }; do SHARE_DRIVER=$(iniget $MANILA_CONF $driver_group share_driver) GENERIC_DRIVER='manila.share.drivers.generic.GenericShareDriver' DHSS=$(iniget $MANILA_CONF $driver_group driver_handles_share_servers) if [[ $SHARE_DRIVER == $GENERIC_DRIVER && $(trueorfalse False DHSS) == False ]]; then # Wait for availability source /opt/stack/new/devstack/openrc admin demo vm_id=$(iniget $MANILA_CONF $driver_group service_instance_name_or_id) vm_ips=$(nova show $vm_id | grep "private network") attempts=0 for vm_ip in ${vm_ips//,/ }; do # Get IPv4 address if [[ $vm_ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then # Check availability ((attempts++)) manila_check_service_vm_availability $vm_ip break fi done if [[ (( attempts < 1 )) ]]; then echo "No IPv4 addresses found among private IPs of '$vm_id' for '$GENERIC_DRIVER'. "\ "Reported IPs: '$vm_ips'." exit 1 fi fi done } function manila_wait_for_drivers_init { # First argument is expected to be file path to Manila config manila_wait_for_generic_driver_init $1 # Sleep to make manila-share service notify manila-scheduler about # its capabilities on time. sleep 10 } manila-2.0.0/contrib/ci/pre_test_hook.sh0000775000567000056710000001041412701407112021345 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside pre_test_hook function in devstack gate. # First argument ($1) expects boolean as value where: # 'False' means share driver will not handle share servers # 'True' means it will handle share servers. # Import devstack function 'trueorfalse' source $BASE/new/devstack/functions localrc_path=$BASE/new/devstack/localrc echo "DEVSTACK_GATE_TEMPEST_ALLOW_TENANT_ISOLATION=1" >> $localrc_path echo "API_RATE_LIMIT=False" >> $localrc_path echo "TEMPEST_SERVICES+=,manila" >> $localrc_path echo "VOLUME_BACKING_FILE_SIZE=22G" >> $localrc_path echo "CINDER_LVM_TYPE=thin" >> $localrc_path # NOTE(mkoderer): switch to keystone v3 by default echo "IDENTITY_API_VERSION=3" >> $localrc_path # NOTE(vponomaryov): Set oversubscription ratio for Cinder LVM driver # bigger than 1.0, because in CI we do not need such small value. # It will allow us to avoid exceeding real capacity in CI test runs. echo "CINDER_OVERSUBSCRIPTION_RATIO=20.0" >> $localrc_path echo "MANILA_BACKEND1_CONFIG_GROUP_NAME=london" >> $localrc_path echo "MANILA_BACKEND2_CONFIG_GROUP_NAME=paris" >> $localrc_path echo "MANILA_SHARE_BACKEND1_NAME=LONDON" >> $localrc_path echo "MANILA_SHARE_BACKEND2_NAME=PARIS" >> $localrc_path # === Handle script arguments === # First argument is expected to be a boolean-like value for DHSS. DHSS=$1 DHSS=$(trueorfalse True DHSS) # Second argument is expected to have codename of a share driver. DRIVER=$2 # Third argument is expected to contain value equal either to 'singlebackend' # or 'multibackend' that defines how many back-ends should be configured. BACK_END_TYPE=$3 echo "MANILA_OPTGROUP_london_driver_handles_share_servers=$DHSS" >> $localrc_path echo "MANILA_OPTGROUP_paris_driver_handles_share_servers=$DHSS" >> $localrc_path echo "MANILA_USE_SERVICE_INSTANCE_PASSWORD=True" >> $localrc_path echo "MANILA_USE_DOWNGRADE_MIGRATIONS=True" >> $localrc_path if [[ "$BACK_END_TYPE" == "multibackend" ]]; then echo "MANILA_MULTI_BACKEND=True" >> $localrc_path else echo "MANILA_MULTI_BACKEND=False" >> $localrc_path fi MANILA_SERVICE_IMAGE_ENABLED=False if [[ "$DRIVER" == "generic" ]]; then MANILA_SERVICE_IMAGE_ENABLED=True echo "SHARE_DRIVER=manila.share.drivers.generic.GenericShareDriver" >> $localrc_path elif [[ "$DRIVER" == "windows" ]]; then MANILA_SERVICE_IMAGE_ENABLED=True echo "SHARE_DRIVER=manila.share.drivers.windows.windows_smb_driver.WindowsSMBDriver" >> $localrc_path elif [[ "$DRIVER" == "lvm" ]]; then echo "SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver" >> $localrc_path echo "SHARE_BACKING_FILE_SIZE=32000M" >> $localrc_path elif [[ "$DRIVER" == "zfsonlinux" ]]; then echo "SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" >> $localrc_path echo "RUN_MANILA_REPLICATION_TESTS=True" >> $localrc_path # Set the replica_state_update_interval to 60 seconds to make # replication tests run faster. The default is 300, which is greater than # the build timeout for ZFS on the gate. echo "MANILA_REPLICA_STATE_UPDATE_INTERVAL=60" >> $localrc_path echo "MANILA_ZFSONLINUX_USE_SSH=True" >> $localrc_path fi echo "MANILA_SERVICE_IMAGE_ENABLED=$MANILA_SERVICE_IMAGE_ENABLED" >> $localrc_path # Enabling isolated metadata in Neutron is required because # Tempest creates isolated networks and created vm's in scenario tests don't # have access to Nova Metadata service. This leads to unavailability of # created vm's in scenario tests. echo 'ENABLE_ISOLATED_METADATA=True' >> $localrc_path # Go to Tempest dir and checkout stable commit to avoid possible # incompatibilities for plugin stored in Manila repo. cd $BASE/new/tempest source $BASE/new/manila/contrib/ci/common.sh git checkout $MANILA_TEMPEST_COMMIT # Print current Tempest status git status manila-2.0.0/contrib/ci/post_test_hook.sh0000775000567000056710000002337112701407107021556 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. # First argument ($1) expects 'multibackend' as value for setting appropriate # tempest conf opts, all other values will assume singlebackend installation. sudo chown -R jenkins:stack $BASE/new/tempest sudo chown -R jenkins:stack $BASE/data/tempest sudo chmod -R o+rx $BASE/new/devstack/files # Import devstack functions 'iniset', 'iniget' and 'trueorfalse' source $BASE/new/devstack/functions export TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf # === Handle script arguments === # First argument is expected to contain value equal either to 'singlebackend' # or 'multibackend' that defines how many back-ends are used. BACK_END_TYPE=$1 # Second argument is expected to have codename of a share driver. DRIVER=$2 # Third argument is expected to contain either 'api' or 'scenario' values # that define test suites to be run. TEST_TYPE=$3 # Fourth argument is expected to be boolean-like and it should be 'true' # when PostgreSQL DB back-end is used and 'false' when MySQL. POSTGRES_ENABLED=$4 POSTGRES_ENABLED=$(trueorfalse True POSTGRES_ENABLED) if [[ "$BACK_END_TYPE" == "multibackend" ]]; then iniset $TEMPEST_CONFIG share multi_backend True iniset $TEMPEST_CONFIG share run_migration_tests $(trueorfalse True RUN_MANILA_MIGRATION_TESTS) # Set share backends names, they are defined within pre_test_hook export BACKENDS_NAMES="LONDON,PARIS" else export BACKENDS_NAMES="LONDON" fi iniset $TEMPEST_CONFIG share backend_names $BACKENDS_NAMES # Set two retries for CI jobs iniset $TEMPEST_CONFIG share share_creation_retry_number 2 # Suppress errors in cleanup of resources SUPPRESS_ERRORS=${SUPPRESS_ERRORS_IN_CLEANUP:-True} iniset $TEMPEST_CONFIG share suppress_errors_in_cleanup $SUPPRESS_ERRORS USERNAME_FOR_USER_RULES=${USERNAME_FOR_USER_RULES:-"manila"} PASSWORD_FOR_SAMBA_USER=${PASSWORD_FOR_SAMBA_USER:-$USERNAME_FOR_USER_RULES} RUN_MANILA_CG_TESTS=${RUN_MANILA_CG_TESTS:-True} RUN_MANILA_MANAGE_TESTS=${RUN_MANILA_MANAGE_TESTS:-True} RUN_MANILA_MANAGE_SNAPSHOT_TESTS=${RUN_MANILA_MANAGE_SNAPSHOT_TESTS:-False} MANILA_CONF=${MANILA_CONF:-/etc/manila/manila.conf} # Enable replication tests RUN_MANILA_REPLICATION_TESTS=${RUN_MANILA_REPLICATION_TESTS:-False} iniset $TEMPEST_CONFIG share run_replication_tests $RUN_MANILA_REPLICATION_TESTS if [[ -z "$MULTITENANCY_ENABLED" ]]; then # Define whether share drivers handle share servers or not. # Requires defined config option 'driver_handles_share_servers'. NO_SHARE_SERVER_HANDLING_MODES=0 WITH_SHARE_SERVER_HANDLING_MODES=0 # Convert backend names to config groups using lowercase translation CONFIG_GROUPS=${BACKENDS_NAMES,,} for CG in ${CONFIG_GROUPS//,/ }; do DRIVER_HANDLES_SHARE_SERVERS=$(iniget $MANILA_CONF $CG driver_handles_share_servers) if [[ $DRIVER_HANDLES_SHARE_SERVERS == False ]]; then NO_SHARE_SERVER_HANDLING_MODES=$((NO_SHARE_SERVER_HANDLING_MODES+1)) elif [[ $DRIVER_HANDLES_SHARE_SERVERS == True ]]; then WITH_SHARE_SERVER_HANDLING_MODES=$((WITH_SHARE_SERVER_HANDLING_MODES+1)) else echo "Config option 'driver_handles_share_servers' either is not defined or \ defined with improper value - '$DRIVER_HANDLES_SHARE_SERVERS'." exit 1 fi done if [[ $NO_SHARE_SERVER_HANDLING_MODES -ge 1 && $WITH_SHARE_SERVER_HANDLING_MODES -ge 1 || \ $NO_SHARE_SERVER_HANDLING_MODES -eq 0 && $WITH_SHARE_SERVER_HANDLING_MODES -eq 0 ]]; then echo 'Allowed only same driver modes for all backends to be run with Tempest job.' exit 1 elif [[ $NO_SHARE_SERVER_HANDLING_MODES -ge 1 ]]; then MULTITENANCY_ENABLED='False' elif [[ $WITH_SHARE_SERVER_HANDLING_MODES -ge 1 ]]; then MULTITENANCY_ENABLED='True' else echo 'Should never get here unless an error occurred.' exit 1 fi else MULTITENANCY_ENABLED=$(trueorfalse True MULTITENANCY_ENABLED) fi # Set multitenancy configuration for Tempest iniset $TEMPEST_CONFIG share multitenancy_enabled $MULTITENANCY_ENABLED if [[ "$MULTITENANCY_ENABLED" == "False" ]]; then # Using approach without handling of share servers we have bigger load for # volume creation in Cinder using Generic driver. So, reduce amount of # threads to avoid errors for Cinder volume creations that appear # because of lack of free space. MANILA_TEMPEST_CONCURRENCY=${MANILA_TEMPEST_CONCURRENCY:-8} fi # let us control if we die or not set +o errexit cd $BASE/new/tempest export MANILA_TEMPEST_CONCURRENCY=${MANILA_TEMPEST_CONCURRENCY:-20} export MANILA_TESTS=${MANILA_TESTS:-'manila_tempest_tests.tests.api'} if [[ "$TEST_TYPE" == "scenario" ]]; then echo "Set test set to scenario only" MANILA_TESTS='manila_tempest_tests.tests.scenario' elif [[ "$DRIVER" == "generic" ]]; then RUN_MANILA_MANAGE_SNAPSHOT_TESTS=True if [[ "$POSTGRES_ENABLED" == "True" ]]; then # Run only CIFS tests on PostgreSQL DB backend # to reduce amount of tests per job using 'generic' share driver. iniset $TEMPEST_CONFIG share enable_protocols cifs else # Run only NFS tests on MySQL DB backend to reduce amount of tests # per job using 'generic' share driver. iniset $TEMPEST_CONFIG share enable_protocols nfs fi fi if [[ "$DRIVER" == "lvm" ]]; then MANILA_TEMPEST_CONCURRENCY=8 RUN_MANILA_CG_TESTS=False RUN_MANILA_MANAGE_TESTS=False iniset $TEMPEST_CONFIG share run_shrink_tests False iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs' iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols 'cifs' if ! grep $USERNAME_FOR_USER_RULES "/etc/passwd"; then sudo useradd $USERNAME_FOR_USER_RULES fi (echo $PASSWORD_FOR_SAMBA_USER; echo $PASSWORD_FOR_SAMBA_USER) | sudo smbpasswd -s -a $USERNAME_FOR_USER_RULES sudo smbpasswd -e $USERNAME_FOR_USER_RULES samba_daemon_name=smbd if is_fedora; then samba_daemon_name=smb fi sudo service $samba_daemon_name restart elif [[ "$DRIVER" == "zfsonlinux" ]]; then MANILA_TEMPEST_CONCURRENCY=8 RUN_MANILA_CG_TESTS=False RUN_MANILA_MANAGE_TESTS=False iniset $TEMPEST_CONFIG share run_migration_tests False iniset $TEMPEST_CONFIG share run_quota_tests True iniset $TEMPEST_CONFIG share run_replication_tests True iniset $TEMPEST_CONFIG share run_shrink_tests True iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs' iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols '' iniset $TEMPEST_CONFIG share enable_cert_rules_for_protocols '' iniset $TEMPEST_CONFIG share enable_ro_access_level_for_protocols 'nfs' iniset $TEMPEST_CONFIG share build_timeout 180 iniset $TEMPEST_CONFIG share share_creation_retry_number 0 iniset $TEMPEST_CONFIG share capability_storage_protocol 'NFS' iniset $TEMPEST_CONFIG share enable_protocols 'nfs' iniset $TEMPEST_CONFIG share suppress_errors_in_cleanup False iniset $TEMPEST_CONFIG share multitenancy_enabled False iniset $TEMPEST_CONFIG share multi_backend True iniset $TEMPEST_CONFIG share backend_replication_type 'readable' fi # Enable consistency group tests iniset $TEMPEST_CONFIG share run_consistency_group_tests $RUN_MANILA_CG_TESTS # Enable manage/unmanage tests iniset $TEMPEST_CONFIG share run_manage_unmanage_tests $RUN_MANILA_MANAGE_TESTS # Enable manage/unmanage snapshot tests iniset $TEMPEST_CONFIG share run_manage_unmanage_snapshot_tests $RUN_MANILA_MANAGE_SNAPSHOT_TESTS # check if tempest plugin was installed correctly echo 'import pkg_resources; print list(pkg_resources.iter_entry_points("tempest.test_plugins"))' | python # Workaround for Tempest architectural changes # See bugs: # 1) https://bugs.launchpad.net/manila/+bug/1531049 # 2) https://bugs.launchpad.net/tempest/+bug/1524717 TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"} ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-"Default"} ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secretadmin"} iniset $TEMPEST_CONFIG auth admin_username ${ADMIN_USERNAME:-"admin"} iniset $TEMPEST_CONFIG auth admin_password $ADMIN_PASSWORD iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME iniset $TEMPEST_CONFIG auth admin_domain_name $ADMIN_DOMAIN_NAME iniset $TEMPEST_CONFIG identity username ${TEMPEST_USERNAME:-"demo"} iniset $TEMPEST_CONFIG identity password $ADMIN_PASSWORD iniset $TEMPEST_CONFIG identity tenant_name ${TEMPEST_TENANT_NAME:-"demo"} iniset $TEMPEST_CONFIG identity alt_username ${ALT_USERNAME:-"alt_demo"} iniset $TEMPEST_CONFIG identity alt_password $ADMIN_PASSWORD iniset $TEMPEST_CONFIG identity alt_tenant_name ${ALT_TENANT_NAME:-"alt_demo"} iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation network_for_ssh ${PRIVATE_NETWORK_NAME:-"private"} export OS_PROJECT_DOMAIN_NAME=$ADMIN_DOMAIN_NAME export OS_USER_DOMAIN_NAME=$ADMIN_DOMAIN_NAME # Also, we should wait until service VM is available # before running Tempest tests using Generic driver in DHSS=False mode. source $BASE/new/manila/contrib/ci/common.sh manila_wait_for_drivers_init $MANILA_CONF echo "Running tempest manila test suites" sudo -H -u jenkins tox -eall-plugin $MANILA_TESTS -- --concurrency=$MANILA_TEMPEST_CONCURRENCY manila-2.0.0/pylintrc0000664000567000056710000000232612701407107015704 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable=C0111,W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ # Module names matching manila-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(manila-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ manila-2.0.0/.testr.conf0000664000567000056710000000057712701407107016211 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \ ${PYTHON:-python} -m subunit.run discover -t ./ ./manila/tests $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list manila-2.0.0/AUTHORS0000664000567000056710000001373712701407264015201 0ustar jenkinsjenkins00000000000000119Vik Abhilash Divakaran Akshai Parthasarathy Aleks Chirko Alex Meade Alex O'Rourke Alex O'Rourke Alexey Ovchinnikov Alin Balutoiu Alyson Rosa Andrea Ma Andreas Jaeger Andreas Jaeger Andrei Ta Andrei V. Ostapenko Andrew Kerr Ankit Agrawal Anthony Lee Atsushi SAKAI Ben Swartzlander Ben Swartzlander Ben Swartzlander Bertrand Lallau Bill Owen Bob Callaway Bob-OpenStack <295988511@qq.com> Béla Vancsics Chandan Kumar Chaozhe.Chen Christian Berendt Clinton Knight Csaba Henk Dan Sneddon Daniel Stelter-Gliese Danny Al-Gaaf David Disseldorp David Sariel Deepak C Shetty Deliang Fan Diem Tran Dirk Mueller Dmitry Bogun Doug Hellmann Dustin Schoenbrun Gaurang Tapase Goutham Pacha Ravi Harshada Mangesh Kakad Hiroyuki Eguchi Igor Malinovskiy James Page Jay Xu Jeremy Stanley Joe Gordon John Spray John Spray Jordan Pittier Jose Falavinha Julia Varlamova Kamil Rykowski Li, Chen Lin Yang LiuNanke Longgeek Lucian Petrut Luis Pabón Lukas Bezdicka Marc Koderer Marc Solanas Tarre Mark McLoughlin Mark Sturdevant Martin Kletzander Marty Turner Masaki Matsushita Matt Riedemann Michael Krotscheck Michael Still Monty Taylor Nicolas Trangez Nilesh Bhosale OTSUKA, Yuanying Ondřej Nový Pete Zaitcev Ponomaryov Valeriy Rafael Rivero Ralf Rantzau Ram Raja Ramana Raja Ramy Asselin Rich Hagarty Rob Esker Rodrigo Barbieri Ronald Bradford Rushil Chugh Ryan Hefner Sam Wan Sascha Peilicke Sergey Vilgelm Shaohui Wang Shaun Edwards Shaun Edwards Shuquan Huang Silvan Kaiser Stephen Gordon Sumit Kumar Sun Jun Surya Ghatty Swapnil Kulkarni (coolsvap) Takashi NATSUME Thierry Carrez Thomas Bechtold Tiago Pasqualini Tin Lam Tom Barron Tom Patzig Valeriy Valeriy Ponomaryov Victor Sergeyev Vijay Bellur Vincent Untz Vladimir Vechkanov Xing Yang Yang Wei Yatin Kumbhare Yogesh Your Name Yulia Portnova Yulia Portnova Yusuke Hayashi Zhao Lei ZhiQiang Fan Zhiteng Huang Zhongyue Luo binean bswartz chen-li daiki kato darkwsh houming-wang hparekh huayue huyang janonymous jason bishop kutner li,chen liucheng marcusvrn mark.sturdevant mark.sturdevant nidhimittalhada peter_wang scottda smcginnis sonu.kumar tclayton ting.wang tpsilva ubu venkatamahesh vponomaryov weiting-chen yangyapeng zhangguoqing zhaohua zhongjun manila-2.0.0/requirements.txt0000664000567000056710000000254512701407107017404 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # pbr should be first pbr>=1.6 # Apache-2.0 alembic>=0.8.0 # MIT Babel>=1.3 # BSD eventlet!=0.18.3,>=0.18.2 # MIT greenlet>=0.3.2 # MIT lxml>=2.3 # BSD netaddr!=0.7.16,>=0.7.12 # BSD oslo.config>=3.7.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 paramiko>=1.16.0 # LGPL Paste # MIT PasteDeploy>=1.5.0 # MIT python-neutronclient!=4.1.0,>=2.6.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 requests!=2.9.0,>=2.8.1 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT stevedore>=1.5.0 # Apache-2.0 python-cinderclient>=1.3.1 # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 WebOb>=1.2.3 # MIT manila-2.0.0/HACKING.rst0000664000567000056710000001137612701407112015714 0ustar jenkinsjenkins00000000000000Manila Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Manila Specific Commandments ---------------------------- - [M319] Validate that debug level logs are not translated. - [M323] Ensure that the _() function is explicitly imported to ensure proper translations. - [M325] str() cannot be used on an exception. Remove use or use six.text_type() - [M326] Translated messages cannot be concatenated. String should be included in translated message. - [M328] LOG.critical messages require translations _LC()! - [M328] LOG.error and LOG.exception messages require translations _LE()! - [M329] LOG.info messages require translations _LI()! - [M330] LOG.warning messages require translations _LW()! - [M331] Log messages require translations! - [M333] 'oslo_' should be used instead of 'oslo.' - [M336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. LOG Translations ---------------- LOG.debug messages will not get translated. Use ``_LI()`` for ``LOG.info``, ``_LW`` for ``LOG.warning``, ``_LE`` for ``LOG.error`` and ``LOG.exception``, and ``_LC()`` for ``LOG.critical``. ``_()`` is preferred for any user facing message, even if it is also going to a log file. This ensures that the translated version of the message will be available to the user. The log marker functions (``_LI()``, ``_LW()``, ``_LE()``, and ``_LC()``) must only be used when the message is only sent directly to the log. Anytime that the message will be passed outside of the current context (for example as part of an exception) the ``_()`` marker function must be used. A common pattern is to define a single message object and use it more than once, for the log call and the exception. In that case, ``_()`` must be used because the message is going to appear in an exception that may be presented to the user. For more details about translations, see http://docs.openstack.org/developer/oslo.i18n/guidelines.html Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Manila, please read manila/testing/README.rst. openstack-common ---------------- A number of modules from openstack-common are imported into the project. These modules are "incubating" in openstack-common and are kept in sync with the help of openstack-common's update.py script. See: http://wiki.openstack.org/CommonLibrary#Incubation The copy of the code should never be directly modified here. Please always update openstack-common first and then run the script to copy the changes across. Running Tests ------------- The testing system is based on a combination of tox and testr. If you just want to run the whole suite, run `tox` and all will be fine. However, if you'd like to dig in a bit more, you might want to learn some things about testr itself. A basic walkthrough for OpenStack can be found at http://wiki.openstack.org/testr OpenStack Trademark ------------------- OpenStack is a registered trademark of OpenStack, LLC, and uses the following capitalization: OpenStack Commit Messages --------------- Using a common format for commit messages will help keep our git history readable. Follow these guidelines: First, provide a brief summary (it is recommended to keep the commit title under 50 chars). The first line of the commit message should provide an accurate description of the change, not just a reference to a bug or blueprint. It must be followed by a single blank line. If the change relates to a specific driver (libvirt, xenapi, qpid, etc...), begin the first line of the commit message with the driver name, lowercased, followed by a colon. Following your brief summary, provide a more detailed description of the patch, manually wrapping the text at 72 characters. This description should provide enough detail that one does not have to refer to external resources to determine its high-level functionality. Once you use 'git review', two lines will be appended to the commit message: a blank line followed by a 'Change-Id'. This is important to correlate this commit with a specific review in Gerrit, and it should not be modified. For further information on constructing high quality commit messages, and how to split up commits into a series of changes, consult the project wiki: http://wiki.openstack.org/GitCommitMessages manila-2.0.0/CONTRIBUTING.rst0000664000567000056710000000103412701407107016551 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/manila manila-2.0.0/releasenotes/0000775000567000056710000000000012701407265016610 5ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/notes/0000775000567000056710000000000012701407265017740 5ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/notes/share-replication-81ecf4a32a5c83b6.yaml0000664000567000056710000000020612701407107026520 0ustar jenkinsjenkins00000000000000--- features: - Shares can be replicated. Replicas can be added, listed, queried for detail, promoted to be 'active' or removed.manila-2.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701407107022204 0ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/notes/add-export-locations-api-6fc6086c6a081faa.yaml0000664000567000056710000000036112701407107027721 0ustar jenkinsjenkins00000000000000--- features: - Added APIs for listing export locations per share and share instances. deprecations: - Removed 'export_location' and 'export_locations' attributes from share and share instance views starting with microversion '2.9'. manila-2.0.0/releasenotes/notes/manage-unmanage-snapshot-bd92164472638f44.yaml0000664000567000056710000000006012701407107027476 0ustar jenkinsjenkins00000000000000--- features: - Manage and unmanage snapshot. manila-2.0.0/releasenotes/notes/add-tegile-driver-1859114513edb13e.yaml0000664000567000056710000000010012701407107026145 0ustar jenkinsjenkins00000000000000--- features: - Added driver for Tegile IntelliFlash arrays. manila-2.0.0/releasenotes/source/0000775000567000056710000000000012701407265020110 5ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/source/index.rst0000664000567000056710000000017012701407112021736 0ustar jenkinsjenkins00000000000000 ==================== Manila Release Notes ==================== .. toctree:: :maxdepth: 1 liberty unreleased manila-2.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701407265022245 5ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701407107024511 0ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000015312701407107022763 0ustar jenkinsjenkins00000000000000============================ Current Series Release Notes ============================ .. release-notes:: manila-2.0.0/releasenotes/source/liberty.rst0000664000567000056710000000021512701407107022305 0ustar jenkinsjenkins00000000000000============================ Liberty Series Release Notes ============================ .. release-notes:: :branch: origin/stable/liberty manila-2.0.0/releasenotes/source/conf.py0000664000567000056710000002157012701407107021407 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Manila Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Manila Release Notes' copyright = u'2015, Manila Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from manila.version import version_info as manila_version # noqa # The full version, including alpha/beta/rc tags. release = manila_version.version_string_with_vcs() # The short X.Y version. version = manila_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ManilaReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ManilaReleaseNotes.tex', u'Manila Release Notes Documentation', u'Manila Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'manilareleasenotes', u'Manila Release Notes Documentation', [u'Manila Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ManilaReleaseNotes', u'Manila Release Notes Documentation', u'Manila Developers', 'ManilaReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False manila-2.0.0/releasenotes/source/_static/0000775000567000056710000000000012701407265021536 5ustar jenkinsjenkins00000000000000manila-2.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701407107024002 0ustar jenkinsjenkins00000000000000manila-2.0.0/README.rst0000664000567000056710000000126112701407107015601 0ustar jenkinsjenkins00000000000000====== MANILA ====== You have come across an OpenStack shared file system service. It has identified itself as "Manila." It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/Manila * Developer docs: http://docs.openstack.org/developer/manila Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://github.com/openstack/manila.git For developer information please see `HACKING.rst `_ You can raise bugs here http://bugs.launchpad.net/manila Python client ------------- https://github.com/openstack/python-manilaclient.git manila-2.0.0/setup.py0000664000567000056710000000200412701407107015620 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) manila-2.0.0/test-requirements.txt0000664000567000056710000000142612701407112020352 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # hacking should be first hacking<0.11,>=0.10.2 bashate>=0.2 # Apache-2.0 coverage>=3.6 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD iso8601>=0.1.9 # MIT PyMySQL>=0.6.2 # MIT License oslotest>=1.10.0 # Apache-2.0 oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL python-subunit>=0.0.18 # Apache-2.0/BSD requests-mock>=0.7.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD os-testr>=0.4.1 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT reno>=0.1.1 # Apache2